Commit 79ae084e authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master: (58 commits)
  amrnbdec: check frame size before decoding.
  cscd: use negative error values to indicate decode_init() failures.
  h264: prevent overreads in intra PCM decoding.
  FATE: do not decode audio in the nuv test.
  dxa: set audio stream time base using the sample rate
  psx-str: do not allow seeking by bytes
  asfdec: Do not set AVCodecContext.frame_size
  vqf: set packet parameters after av_new_packet()
  mpegaudiodec: use DSPUtil.butterflies_float().
  FATE: add mp3 test for sample that exhibited false overreads
  fate: add cdxl test for bit line plane arrangement
  vmnc: return error on decode_init() failure.
  libvorbis: add/update error messages
  libvorbis: use AVFifoBuffer for output packet buffer
  libvorbis: remove unneeded e_o_s check
  libvorbis: check return values for functions that can return errors
  libvorbis: use float input instead of s16
  libvorbis: do not flush libvorbis analysis if dsp state was not initialized
  libvorbis: use VBR by default, with default quality of 3
  libvorbis: fix use of minrate/maxrate AVOptions
  ...

Conflicts:
	Changelog
	doc/APIchanges
	libavcodec/avcodec.h
	libavcodec/dpxenc.c
	libavcodec/libvorbis.c
	libavcodec/vmnc.c
	libavformat/asfdec.c
	libavformat/id3v2enc.c
	libavformat/internal.h
	libavformat/mp3enc.c
	libavformat/utils.c
	libavformat/version.h
	libswscale/utils.c
	tests/fate/video.mak
	tests/ref/fate/nuv
	tests/ref/fate/prores-alpha
	tests/ref/lavf/ffm
	tests/ref/vsynth1/prores
	tests/ref/vsynth2/prores
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents a77c8ade 882abda5
...@@ -8,6 +8,7 @@ version next: ...@@ -8,6 +8,7 @@ version next:
- Apple ProRes encoder - Apple ProRes encoder
- ffprobe -count_packets and -count_frames options - ffprobe -count_packets and -count_frames options
- Sun Rasterfile Encoder - Sun Rasterfile Encoder
- ID3v2 attached pictures reading and writing
version 0.10: version 0.10:
......
...@@ -13,6 +13,7 @@ libavutil: 2011-04-18 ...@@ -13,6 +13,7 @@ libavutil: 2011-04-18
API changes, most recent first: API changes, most recent first:
<<<<<<< HEAD
2012-02-21 - xxxxxxx - lavc 54.4.100 2012-02-21 - xxxxxxx - lavc 54.4.100
Add av_get_pcm_codec() function. Add av_get_pcm_codec() function.
...@@ -35,6 +36,13 @@ API changes, most recent first: ...@@ -35,6 +36,13 @@ API changes, most recent first:
2012-01-24 - xxxxxxx - lavfi 2.60.100 2012-01-24 - xxxxxxx - lavfi 2.60.100
Add avfilter_graph_dump. Add avfilter_graph_dump.
||||||| merged common ancestors
=======
2012-xx-xx - xxxxxxx - lavf 54.2.0 - avformat.h
Add AVStream.attached_pic and AV_DISPOSITION_ATTACHED_PIC,
used for dealing with attached pictures/cover art.
>>>>>>> qatar/master
2012-02-25 - c9bca80 - lavu 51.24.0 - error.h 2012-02-25 - c9bca80 - lavu 51.24.0 - error.h
Add AVERROR_UNKNOWN Add AVERROR_UNKNOWN
......
...@@ -370,5 +370,39 @@ Wrap around segment index once it reaches @var{limit}. ...@@ -370,5 +370,39 @@ Wrap around segment index once it reaches @var{limit}.
ffmpeg -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut ffmpeg -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut
@end example @end example
@section mp3
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and
optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the
@code{id3v2_version} option controls which one is used. The legacy ID3v1 tag is
not written by default, but may be enabled with the @code{write_id3v1} option.
For seekable output the muxer also writes a Xing frame at the beginning, which
contains the number of frames in the file. It is useful for computing duration
of VBR files.
The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures
are supplied to the muxer in form of a video stream with a single packet. There
can be any number of those streams, each will correspond to a single APIC frame.
The stream metadata tags @var{title} and @var{comment} map to APIC
@var{description} and @var{picture type} respectively. See
@url{http://id3.org/id3v2.4.0-frames} for allowed picture types.
Note that the APIC frames must be written at the beginning, so the muxer will
buffer the audio frames until it gets all the pictures. It is therefore advised
to provide the pictures as soon as possible to avoid excessive buffering.
Examples:
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
@example
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
@end example
Attach a picture to an mp3:
@example
ffmpeg -i input.mp3 -i cover.png -c copy -metadata:s:v title="Album cover"
-metadata:s:v comment="Cover (Front)" out.mp3
@end example
@c man end MUXERS @c man end MUXERS
...@@ -114,55 +114,6 @@ static VLC vlc_spectral[11]; ...@@ -114,55 +114,6 @@ static VLC vlc_spectral[11];
static const char overread_err[] = "Input buffer exhausted before END element found\n"; static const char overread_err[] = "Input buffer exhausted before END element found\n";
static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
{
// For PCE based channel configurations map the channels solely based on tags.
if (!ac->m4ac.chan_config) {
return ac->tag_che_map[type][elem_id];
}
// For indexed channel configurations map the channels solely based on position.
switch (ac->m4ac.chan_config) {
case 7:
if (ac->tags_mapped == 3 && type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
}
case 6:
/* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
ac->tags_mapped++;
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
}
case 5:
if (ac->tags_mapped == 2 && type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
}
case 4:
if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
}
case 3:
case 2:
if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
} else if (ac->m4ac.chan_config == 2) {
return NULL;
}
case 1:
if (!ac->tags_mapped && type == TYPE_SCE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
}
default:
return NULL;
}
}
static int count_channels(uint8_t (*layout)[3], int tags) static int count_channels(uint8_t (*layout)[3], int tags)
{ {
int i, sum = 0; int i, sum = 0;
...@@ -454,6 +405,90 @@ static void flush(AVCodecContext *avctx) ...@@ -454,6 +405,90 @@ static void flush(AVCodecContext *avctx)
} }
} }
/**
* Set up channel positions based on a default channel configuration
* as specified in table 1.17.
*
* @return Returns error status. 0 - OK, !0 - error
*/
static av_cold int set_default_channel_config(AVCodecContext *avctx,
uint8_t (*layout_map)[3],
int *tags,
int channel_config)
{
if (channel_config < 1 || channel_config > 7) {
av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
channel_config);
return -1;
}
*tags = tags_per_config[channel_config];
memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
return 0;
}
static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
{
// For PCE based channel configurations map the channels solely based on tags.
if (!ac->m4ac.chan_config) {
return ac->tag_che_map[type][elem_id];
}
// Allow single CPE stereo files to be signalled with mono configuration.
if (!ac->tags_mapped && type == TYPE_CPE && ac->m4ac.chan_config == 1) {
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
2) < 0)
return NULL;
if (output_configure(ac, layout_map, layout_map_tags,
2, OC_TRIAL_FRAME) < 0)
return NULL;
ac->m4ac.chan_config = 2;
}
// For indexed channel configurations map the channels solely based on position.
switch (ac->m4ac.chan_config) {
case 7:
if (ac->tags_mapped == 3 && type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
}
case 6:
/* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
ac->tags_mapped++;
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
}
case 5:
if (ac->tags_mapped == 2 && type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
}
case 4:
if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
}
case 3:
case 2:
if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
} else if (ac->m4ac.chan_config == 2) {
return NULL;
}
case 1:
if (!ac->tags_mapped && type == TYPE_SCE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
}
default:
return NULL;
}
}
/** /**
* Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit. * Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
* *
...@@ -550,27 +585,6 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac, ...@@ -550,27 +585,6 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
return tags; return tags;
} }
/**
* Set up channel positions based on a default channel configuration
* as specified in table 1.17.
*
* @return Returns error status. 0 - OK, !0 - error
*/
static av_cold int set_default_channel_config(AVCodecContext *avctx,
uint8_t (*layout_map)[3],
int *tags,
int channel_config)
{
if (channel_config < 1 || channel_config > 7) {
av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
channel_config);
return -1;
}
*tags = tags_per_config[channel_config];
memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
return 0;
}
/** /**
* Decode GA "General Audio" specific configuration; reference: table 4.1. * Decode GA "General Audio" specific configuration; reference: table 4.1.
* *
......
...@@ -2138,6 +2138,17 @@ static av_cold int validate_options(AC3EncodeContext *s) ...@@ -2138,6 +2138,17 @@ static av_cold int validate_options(AC3EncodeContext *s)
s->bit_alloc.sr_code = i % 3; s->bit_alloc.sr_code = i % 3;
s->bitstream_id = s->eac3 ? 16 : 8 + s->bit_alloc.sr_shift; s->bitstream_id = s->eac3 ? 16 : 8 + s->bit_alloc.sr_shift;
/* select a default bit rate if not set by the user */
if (!avctx->bit_rate) {
switch (s->fbw_channels) {
case 1: avctx->bit_rate = 96000; break;
case 2: avctx->bit_rate = 192000; break;
case 3: avctx->bit_rate = 320000; break;
case 4: avctx->bit_rate = 384000; break;
case 5: avctx->bit_rate = 448000; break;
}
}
/* validate bit rate */ /* validate bit rate */
if (s->eac3) { if (s->eac3) {
int max_br, min_br, wpf, min_br_dist, min_br_code; int max_br, min_br, wpf, min_br_dist, min_br_code;
...@@ -2186,15 +2197,20 @@ static av_cold int validate_options(AC3EncodeContext *s) ...@@ -2186,15 +2197,20 @@ static av_cold int validate_options(AC3EncodeContext *s)
wpf--; wpf--;
s->frame_size_min = 2 * wpf; s->frame_size_min = 2 * wpf;
} else { } else {
int best_br = 0, best_code = 0, best_diff = INT_MAX;
for (i = 0; i < 19; i++) { for (i = 0; i < 19; i++) {
if ((ff_ac3_bitrate_tab[i] >> s->bit_alloc.sr_shift)*1000 == avctx->bit_rate) int br = (ff_ac3_bitrate_tab[i] >> s->bit_alloc.sr_shift) * 1000;
int diff = abs(br - avctx->bit_rate);
if (diff < best_diff) {
best_br = br;
best_code = i;
best_diff = diff;
}
if (!best_diff)
break; break;
} }
if (i == 19) { avctx->bit_rate = best_br;
av_log(avctx, AV_LOG_ERROR, "invalid bit rate\n"); s->frame_size_code = best_code << 1;
return AVERROR(EINVAL);
}
s->frame_size_code = i << 1;
s->frame_size_min = 2 * ff_ac3_frame_size_tab[s->frame_size_code][s->bit_alloc.sr_code]; s->frame_size_min = 2 * ff_ac3_frame_size_tab[s->frame_size_code][s->bit_alloc.sr_code];
s->num_blks_code = 0x3; s->num_blks_code = 0x3;
s->num_blocks = 6; s->num_blocks = 6;
......
...@@ -157,4 +157,5 @@ AVCodec ff_ac3_fixed_encoder = { ...@@ -157,4 +157,5 @@ AVCodec ff_ac3_fixed_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
.priv_class = &ac3enc_class, .priv_class = &ac3enc_class,
.channel_layouts = ff_ac3_channel_layouts, .channel_layouts = ff_ac3_channel_layouts,
.defaults = ac3_defaults,
}; };
...@@ -155,5 +155,6 @@ AVCodec ff_ac3_encoder = { ...@@ -155,5 +155,6 @@ AVCodec ff_ac3_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
.priv_class = &ac3enc_class, .priv_class = &ac3enc_class,
.channel_layouts = ff_ac3_channel_layouts, .channel_layouts = ff_ac3_channel_layouts,
.defaults = ac3_defaults,
}; };
#endif #endif
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "internal.h"
#include "ac3.h" #include "ac3.h"
#if AC3ENC_TYPE == AC3ENC_TYPE_AC3_FIXED #if AC3ENC_TYPE == AC3ENC_TYPE_AC3_FIXED
...@@ -78,3 +79,8 @@ static const AVOption eac3_options[] = { ...@@ -78,3 +79,8 @@ static const AVOption eac3_options[] = {
{"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_AUTO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "cpl_start_band"}, {"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_AUTO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "cpl_start_band"},
{NULL} {NULL}
}; };
static const AVCodecDefault ac3_defaults[] = {
{ "b", "0" },
{ NULL }
};
...@@ -200,6 +200,10 @@ static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf, ...@@ -200,6 +200,10 @@ static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf,
p->bad_frame_indicator = !get_bits1(&gb); // quality bit p->bad_frame_indicator = !get_bits1(&gb); // quality bit
skip_bits(&gb, 2); // two padding bits skip_bits(&gb, 2); // two padding bits
if (mode >= N_MODES || buf_size < frame_sizes_nb[mode] + 1) {
return NO_DATA;
}
if (mode < MODE_DTX) if (mode < MODE_DTX)
ff_amr_bit_reorder((uint16_t *) &p->frame, sizeof(AMRNBFrame), buf + 1, ff_amr_bit_reorder((uint16_t *) &p->frame, sizeof(AMRNBFrame), buf + 1,
amr_unpacking_bitmaps_per_mode[mode]); amr_unpacking_bitmaps_per_mode[mode]);
...@@ -947,6 +951,10 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data, ...@@ -947,6 +951,10 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
buf_out = (float *)p->avframe.data[0]; buf_out = (float *)p->avframe.data[0];
p->cur_frame_mode = unpack_bitstream(p, buf, buf_size); p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
if (p->cur_frame_mode == NO_DATA) {
av_log(avctx, AV_LOG_ERROR, "Corrupt bitstream\n");
return AVERROR_INVALIDDATA;
}
if (p->cur_frame_mode == MODE_DTX) { if (p->cur_frame_mode == MODE_DTX) {
av_log_missing_feature(avctx, "dtx mode", 0); av_log_missing_feature(avctx, "dtx mode", 0);
av_log(avctx, AV_LOG_INFO, "Note: libopencore_amrnb supports dtx\n"); av_log(avctx, AV_LOG_INFO, "Note: libopencore_amrnb supports dtx\n");
......
...@@ -582,17 +582,19 @@ typedef struct RcOverride{ ...@@ -582,17 +582,19 @@ typedef struct RcOverride{
#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT). #define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT).
/* Fx : Flag for h263+ extra options */ /* Fx : Flag for h263+ extra options */
#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction #define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction
#define CODEC_FLAG_CBP_RD 0x04000000 ///< Use rate distortion optimization for cbp.
#define CODEC_FLAG_QP_RD 0x08000000 ///< Use rate distortion optimization for qp selectioon.
#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter #define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter
#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation #define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation
#define CODEC_FLAG_CLOSED_GOP 0x80000000 #define CODEC_FLAG_CLOSED_GOP 0x80000000
#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks. #define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks.
#define CODEC_FLAG2_STRICT_GOP 0x00000002 ///< Strictly enforce GOP size.
#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding. #define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding.
#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata. #define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata.
#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!! #define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!!
#if FF_API_MPV_GLOBAL_OPTS
#define CODEC_FLAG_CBP_RD 0x04000000 ///< Use rate distortion optimization for cbp.
#define CODEC_FLAG_QP_RD 0x08000000 ///< Use rate distortion optimization for qp selectioon.
#define CODEC_FLAG2_STRICT_GOP 0x00000002 ///< Strictly enforce GOP size.
#define CODEC_FLAG2_SKIP_RD 0x00004000 ///< RD optimal MB level residual skipping #define CODEC_FLAG2_SKIP_RD 0x00004000 ///< RD optimal MB level residual skipping
#endif
#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries. #define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
#define CODEC_FLAG2_SHOW_ALL 0x00400000 ///< Show all frames before the first keyframe #define CODEC_FLAG2_SHOW_ALL 0x00400000 ///< Show all frames before the first keyframe
...@@ -1492,19 +1494,21 @@ typedef struct AVCodecContext { ...@@ -1492,19 +1494,21 @@ typedef struct AVCodecContext {
int b_frame_strategy; int b_frame_strategy;
#if FF_API_MPV_GLOBAL_OPTS
/** /**
* luma single coefficient elimination threshold * luma single coefficient elimination threshold
* - encoding: Set by user. * - encoding: Set by user.
* - decoding: unused * - decoding: unused
*/ */
int luma_elim_threshold; attribute_deprecated int luma_elim_threshold;
/** /**
* chroma single coeff elimination threshold * chroma single coeff elimination threshold
* - encoding: Set by user. * - encoding: Set by user.
* - decoding: unused * - decoding: unused
*/ */
int chroma_elim_threshold; attribute_deprecated int chroma_elim_threshold;
#endif
/** /**
* qscale offset between IP and B-frames * qscale offset between IP and B-frames
...@@ -1735,13 +1739,15 @@ typedef struct AVCodecContext { ...@@ -1735,13 +1739,15 @@ typedef struct AVCodecContext {
*/ */
int inter_quant_bias; int inter_quant_bias;
#if FF_API_COLOR_TABLE_ID
/** /**
* color table ID * color table ID
* - encoding: unused * - encoding: unused
* - decoding: Which clrtable should be used for 8bit RGB images. * - decoding: Which clrtable should be used for 8bit RGB images.
* Tables have to be stored somewhere. FIXME * Tables have to be stored somewhere. FIXME
*/ */
int color_table_id; attribute_deprecated int color_table_id;
#endif
/** /**
* slice flags * slice flags
...@@ -1799,19 +1805,19 @@ typedef struct AVCodecContext { ...@@ -1799,19 +1805,19 @@ typedef struct AVCodecContext {
*/ */
int noise_reduction; int noise_reduction;
#if FF_API_INTER_THRESHOLD
/** /**
* * @deprecated this field is unused
* - encoding: Set by user.
* - decoding: unused
*/ */
int inter_threshold; attribute_deprecated int inter_threshold;
#endif
#if FF_API_MPV_GLOBAL_OPTS
/** /**
* quantizer noise shaping * @deprecated use mpegvideo private options instead
* - encoding: Set by user.
* - decoding: unused
*/ */
int quantizer_noise_shaping; attribute_deprecated int quantizer_noise_shaping;
#endif
/** /**
* Motion estimation threshold below which no motion estimation is * Motion estimation threshold below which no motion estimation is
......
...@@ -228,7 +228,7 @@ static av_cold int decode_init(AVCodecContext *avctx) { ...@@ -228,7 +228,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"CamStudio codec error: invalid depth %i bpp\n", "CamStudio codec error: invalid depth %i bpp\n",
avctx->bits_per_coded_sample); avctx->bits_per_coded_sample);
return 1; return AVERROR_INVALIDDATA;
} }
c->bpp = avctx->bits_per_coded_sample; c->bpp = avctx->bits_per_coded_sample;
avcodec_get_frame_defaults(&c->pic); avcodec_get_frame_defaults(&c->pic);
...@@ -242,7 +242,7 @@ static av_cold int decode_init(AVCodecContext *avctx) { ...@@ -242,7 +242,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING); c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING);
if (!c->decomp_buf) { if (!c->decomp_buf) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
return 1; return AVERROR(ENOMEM);
} }
return 0; return 0;
} }
......
...@@ -131,9 +131,8 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -131,9 +131,8 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
memcpy (buf + 8, "V1.0", 4); memcpy (buf + 8, "V1.0", 4);
write32(buf + 20, 1); /* new image */ write32(buf + 20, 1); /* new image */
write32(buf + 24, HEADER_SIZE); write32(buf + 24, HEADER_SIZE);
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ if (!(avctx->flags & CODEC_FLAG_BITEXACT))
memcpy (buf + 160, LIBAVCODEC_IDENT, FFMIN(sizeof(LIBAVCODEC_IDENT), 100)); memcpy (buf + 160, LIBAVCODEC_IDENT, FFMIN(sizeof(LIBAVCODEC_IDENT), 100));
}
write32(buf + 660, 0xFFFFFFFF); /* unencrypted */ write32(buf + 660, 0xFFFFFFFF); /* unencrypted */
/* Image information header */ /* Image information header */
......
...@@ -258,5 +258,6 @@ AVCodec ff_eac3_encoder = { ...@@ -258,5 +258,6 @@ AVCodec ff_eac3_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52 E-AC-3"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52 E-AC-3"),
.priv_class = &eac3enc_class, .priv_class = &eac3enc_class,
.channel_layouts = ff_ac3_channel_layouts, .channel_layouts = ff_ac3_channel_layouts,
.defaults = ac3_defaults,
}; };
#endif #endif
...@@ -84,6 +84,8 @@ void ff_flv2_encode_ac_esc(PutBitContext *pb, int slevel, int level, int run, in ...@@ -84,6 +84,8 @@ void ff_flv2_encode_ac_esc(PutBitContext *pb, int slevel, int level, int run, in
} }
} }
FF_MPV_GENERIC_CLASS(flv)
AVCodec ff_flv_encoder = { AVCodec ff_flv_encoder = {
.name = "flv", .name = "flv",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
...@@ -94,4 +96,5 @@ AVCodec ff_flv_encoder = { ...@@ -94,4 +96,5 @@ AVCodec ff_flv_encoder = {
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"), .long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
.priv_class = &flv_class,
}; };
...@@ -321,6 +321,8 @@ static void h261_encode_block(H261Context * h, DCTELEM * block, int n){ ...@@ -321,6 +321,8 @@ static void h261_encode_block(H261Context * h, DCTELEM * block, int n){
} }
} }
FF_MPV_GENERIC_CLASS(h261)
AVCodec ff_h261_encoder = { AVCodec ff_h261_encoder = {
.name = "h261", .name = "h261",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
...@@ -331,4 +333,5 @@ AVCodec ff_h261_encoder = { ...@@ -331,4 +333,5 @@ AVCodec ff_h261_encoder = {
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.261"), .long_name= NULL_IF_CONFIG_SMALL("H.261"),
.priv_class = &h261_class,
}; };
...@@ -148,7 +148,7 @@ static inline int get_p_cbp(MpegEncContext * s, ...@@ -148,7 +148,7 @@ static inline int get_p_cbp(MpegEncContext * s,
int motion_x, int motion_y){ int motion_x, int motion_y){
int cbp, i; int cbp, i;
if(s->flags & CODEC_FLAG_CBP_RD){ if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
int best_cbpy_score= INT_MAX; int best_cbpy_score= INT_MAX;
int best_cbpc_score= INT_MAX; int best_cbpc_score= INT_MAX;
int cbpc = (-1), cbpy= (-1); int cbpc = (-1), cbpy= (-1);
......
...@@ -1998,6 +1998,8 @@ decode_intra_mb: ...@@ -1998,6 +1998,8 @@ decode_intra_mb:
} }
// The pixels are stored in the same order as levels in h->mb array. // The pixels are stored in the same order as levels in h->mb array.
if ((int) (h->cabac.bytestream_end - ptr) < mb_size)
return -1;
memcpy(h->mb, ptr, mb_size); ptr+=mb_size; memcpy(h->mb, ptr, mb_size); ptr+=mb_size;
ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr); ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr);
......
This diff is collapsed.
...@@ -939,6 +939,7 @@ static void mpeg1_encode_block(MpegEncContext *s, ...@@ -939,6 +939,7 @@ static void mpeg1_encode_block(MpegEncContext *s,
static const AVOption mpeg1_options[] = { static const AVOption mpeg1_options[] = {
COMMON_OPTS COMMON_OPTS
FF_MPV_COMMON_OPTS
{ NULL }, { NULL },
}; };
...@@ -946,6 +947,7 @@ static const AVOption mpeg2_options[] = { ...@@ -946,6 +947,7 @@ static const AVOption mpeg2_options[] = {
COMMON_OPTS COMMON_OPTS
{ "non_linear_quant", "Use nonlinear quantizer.", OFFSET(q_scale_type), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE }, { "non_linear_quant", "Use nonlinear quantizer.", OFFSET(q_scale_type), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
{ "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE }, { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
FF_MPV_COMMON_OPTS
{ NULL }, { NULL },
}; };
......
...@@ -430,7 +430,7 @@ static inline int get_b_cbp(MpegEncContext * s, DCTELEM block[6][64], ...@@ -430,7 +430,7 @@ static inline int get_b_cbp(MpegEncContext * s, DCTELEM block[6][64],
{ {
int cbp = 0, i; int cbp = 0, i;
if (s->flags & CODEC_FLAG_CBP_RD) { if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
int score = 0; int score = 0;
const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6); const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
...@@ -1330,6 +1330,7 @@ void ff_mpeg4_encode_video_packet_header(MpegEncContext *s) ...@@ -1330,6 +1330,7 @@ void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
static const AVOption options[] = { static const AVOption options[] = {
{ "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE }, { "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
{ "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE }, { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
FF_MPV_COMMON_OPTS
{ NULL }, { NULL },
}; };
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "get_bits.h" #include "get_bits.h"
#include "mathops.h" #include "mathops.h"
#include "mpegaudiodsp.h" #include "mpegaudiodsp.h"
#include "dsputil.h"
/* /*
* TODO: * TODO:
...@@ -82,6 +83,7 @@ typedef struct MPADecodeContext { ...@@ -82,6 +83,7 @@ typedef struct MPADecodeContext {
int err_recognition; int err_recognition;
AVCodecContext* avctx; AVCodecContext* avctx;
MPADSPContext mpadsp; MPADSPContext mpadsp;
DSPContext dsp;
AVFrame frame; AVFrame frame;
} MPADecodeContext; } MPADecodeContext;
...@@ -434,6 +436,7 @@ static av_cold int decode_init(AVCodecContext * avctx) ...@@ -434,6 +436,7 @@ static av_cold int decode_init(AVCodecContext * avctx)
s->avctx = avctx; s->avctx = avctx;
ff_mpadsp_init(&s->mpadsp); ff_mpadsp_init(&s->mpadsp);
ff_dsputil_init(&s->dsp, avctx);
avctx->sample_fmt= OUT_FMT; avctx->sample_fmt= OUT_FMT;
s->err_recognition = avctx->err_recognition; s->err_recognition = avctx->err_recognition;
...@@ -1155,6 +1158,9 @@ found2: ...@@ -1155,6 +1158,9 @@ found2:
/* ms stereo ONLY */ /* ms stereo ONLY */
/* NOTE: the 1/sqrt(2) normalization factor is included in the /* NOTE: the 1/sqrt(2) normalization factor is included in the
global gain */ global gain */
#if CONFIG_FLOAT
s-> dsp.butterflies_float(g0->sb_hybrid, g1->sb_hybrid, 576);
#else
tab0 = g0->sb_hybrid; tab0 = g0->sb_hybrid;
tab1 = g1->sb_hybrid; tab1 = g1->sb_hybrid;
for (i = 0; i < 576; i++) { for (i = 0; i < 576; i++) {
...@@ -1163,6 +1169,7 @@ found2: ...@@ -1163,6 +1169,7 @@ found2:
tab0[i] = tmp0 + tmp1; tab0[i] = tmp0 + tmp1;
tab1[i] = tmp0 - tmp1; tab1[i] = tmp0 - tmp1;
} }
#endif
} }
} }
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#include "rl.h" #include "rl.h"
#include "libavutil/timecode.h" #include "libavutil/timecode.h"
#include "libavutil/opt.h"
#define FRAME_SKIPPED 100 ///< return value for header parsers if frame is not coded #define FRAME_SKIPPED 100 ///< return value for header parsers if frame is not coded
enum OutputFormat { enum OutputFormat {
...@@ -695,6 +697,9 @@ typedef struct MpegEncContext { ...@@ -695,6 +697,9 @@ typedef struct MpegEncContext {
int (*dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow); int (*dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow);
int (*fast_dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow); int (*fast_dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow);
void (*denoise_dct)(struct MpegEncContext *s, DCTELEM *block); void (*denoise_dct)(struct MpegEncContext *s, DCTELEM *block);
int mpv_flags; ///< flags set by private options
int quantizer_noise_shaping;
} MpegEncContext; } MpegEncContext;
#define REBASE_PICTURE(pic, new_ctx, old_ctx) (pic ? \ #define REBASE_PICTURE(pic, new_ctx, old_ctx) (pic ? \
...@@ -702,6 +707,36 @@ typedef struct MpegEncContext { ...@@ -702,6 +707,36 @@ typedef struct MpegEncContext {
&new_ctx->picture[pic - old_ctx->picture] : pic - (Picture*)old_ctx + (Picture*)new_ctx)\ &new_ctx->picture[pic - old_ctx->picture] : pic - (Picture*)old_ctx + (Picture*)new_ctx)\
: NULL) : NULL)
/* mpegvideo_enc common options */
#define FF_MPV_FLAG_SKIP_RD 0x0001
#define FF_MPV_FLAG_STRICT_GOP 0x0002
#define FF_MPV_FLAG_QP_RD 0x0004
#define FF_MPV_FLAG_CBP_RD 0x0008
#define FF_MPV_OFFSET(x) offsetof(MpegEncContext, x)
#define FF_MPV_OPT_FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
#define FF_MPV_COMMON_OPTS \
{ "mpv_flags", "Flags common for all mpegvideo-based encoders.", FF_MPV_OFFSET(mpv_flags), AV_OPT_TYPE_FLAGS, { 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "mpv_flags" },\
{ "skip_rd", "RD optimal MB level residual skipping", 0, AV_OPT_TYPE_CONST, { FF_MPV_FLAG_SKIP_RD }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
{ "strict_gop", "Strictly enforce gop size", 0, AV_OPT_TYPE_CONST, { FF_MPV_FLAG_STRICT_GOP }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
{ "qp_rd", "Use rate distortion optimization for qp selection", 0, AV_OPT_TYPE_CONST, { FF_MPV_FLAG_QP_RD }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
{ "cbp_rd", "use rate distortion optimization for CBP", 0, AV_OPT_TYPE_CONST, { FF_MPV_FLAG_CBP_RD }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
{ "luma_elim_threshold", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)",\
FF_MPV_OFFSET(luma_elim_threshold), AV_OPT_TYPE_INT, { 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS },\
{ "chroma_elim_threshold", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)",\
FF_MPV_OFFSET(chroma_elim_threshold), AV_OPT_TYPE_INT, { 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS },\
{ "quantizer_noise_shaping", NULL, FF_MPV_OFFSET(quantizer_noise_shaping), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, FF_MPV_OPT_FLAGS },
extern const AVOption ff_mpv_generic_options[];
#define FF_MPV_GENERIC_CLASS(name) \
static const AVClass name ## _class = {\
.class_name = #name " encoder",\
.item_name = av_default_item_name,\
.option = ff_mpv_generic_options,\
.version = LIBAVUTIL_VERSION_INT,\
};
void ff_MPV_decode_defaults(MpegEncContext *s); void ff_MPV_decode_defaults(MpegEncContext *s);
int ff_MPV_common_init(MpegEncContext *s); int ff_MPV_common_init(MpegEncContext *s);
void ff_MPV_common_end(MpegEncContext *s); void ff_MPV_common_end(MpegEncContext *s);
......
This diff is collapsed.
...@@ -98,12 +98,16 @@ static const AVOption options[]={ ...@@ -98,12 +98,16 @@ static const AVOption options[]={
{"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"}, {"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"},
{"bitexact", "use only bitexact stuff (except (i)dct)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"}, {"bitexact", "use only bitexact stuff (except (i)dct)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"},
{"aic", "h263 advanced intra coding / mpeg4 ac prediction", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"}, {"aic", "h263 advanced intra coding / mpeg4 ac prediction", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"},
{"cbp", "use rate distortion optimization for cbp", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CBP_RD }, INT_MIN, INT_MAX, V|E, "flags"}, #if FF_API_MPV_GLOBAL_OPTS
{"qprd", "use rate distortion optimization for qp selection", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QP_RD }, INT_MIN, INT_MAX, V|E, "flags"}, {"cbp", "Deprecated, use mpegvideo private options instead", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CBP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
{"qprd", "Deprecated, use mpegvideo private options instead", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
#endif
{"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"}, {"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"},
{"cgop", "closed gop", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"}, {"cgop", "closed gop", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"},
{"fast", "allow non spec compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"}, {"fast", "allow non spec compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"},
{"sgop", "strictly enforce gop size", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_STRICT_GOP }, INT_MIN, INT_MAX, V|E, "flags2"}, #if FF_API_MPV_GLOBAL_OPTS
{"sgop", "Deprecated, use mpegvideo private options instead", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_STRICT_GOP }, INT_MIN, INT_MAX, V|E, "flags2"},
#endif
{"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"}, {"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"},
{"local_header", "place global headers at every keyframe instead of in extradata", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"}, {"local_header", "place global headers at every keyframe instead of in extradata", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"},
{"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"}, {"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"},
...@@ -167,8 +171,10 @@ static const AVOption options[]={ ...@@ -167,8 +171,10 @@ static const AVOption options[]={
{"dc_clip", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_BUG_DC_CLIP }, INT_MIN, INT_MAX, V|D, "bug"}, {"dc_clip", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_BUG_DC_CLIP }, INT_MIN, INT_MAX, V|D, "bug"},
{"ms", "workaround various bugs in microsofts broken decoders", 0, AV_OPT_TYPE_CONST, {.dbl = FF_BUG_MS }, INT_MIN, INT_MAX, V|D, "bug"}, {"ms", "workaround various bugs in microsofts broken decoders", 0, AV_OPT_TYPE_CONST, {.dbl = FF_BUG_MS }, INT_MIN, INT_MAX, V|D, "bug"},
{"trunc", "trancated frames", 0, AV_OPT_TYPE_CONST, {.dbl = FF_BUG_TRUNCATED}, INT_MIN, INT_MAX, V|D, "bug"}, {"trunc", "trancated frames", 0, AV_OPT_TYPE_CONST, {.dbl = FF_BUG_TRUNCATED}, INT_MIN, INT_MAX, V|D, "bug"},
#if FF_API_MPV_GLOBAL_OPTS
{"lelim", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)", OFFSET(luma_elim_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"lelim", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)", OFFSET(luma_elim_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"celim", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)", OFFSET(chroma_elim_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"celim", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)", OFFSET(chroma_elim_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
#endif
{"strict", "how strictly to follow the standards", OFFSET(strict_std_compliance), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|D|E, "strict"}, {"strict", "how strictly to follow the standards", OFFSET(strict_std_compliance), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
{"very", "strictly conform to a older more strict version of the spec or reference software", 0, AV_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_VERY_STRICT }, INT_MIN, INT_MAX, V|D|E, "strict"}, {"very", "strictly conform to a older more strict version of the spec or reference software", 0, AV_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_VERY_STRICT }, INT_MIN, INT_MAX, V|D|E, "strict"},
{"strict", "strictly conform to all the things in the spec no matter what consequences", 0, AV_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_STRICT }, INT_MIN, INT_MAX, V|D|E, "strict"}, {"strict", "strictly conform to all the things in the spec no matter what consequences", 0, AV_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_STRICT }, INT_MIN, INT_MAX, V|D|E, "strict"},
...@@ -194,8 +200,8 @@ static const AVOption options[]={ ...@@ -194,8 +200,8 @@ static const AVOption options[]={
{"rc_qmod_freq", "experimental quantizer modulation", OFFSET(rc_qmod_freq), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"rc_qmod_freq", "experimental quantizer modulation", OFFSET(rc_qmod_freq), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"rc_override_count", NULL, OFFSET(rc_override_count), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX}, {"rc_override_count", NULL, OFFSET(rc_override_count), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
{"rc_eq", "set rate control equation", OFFSET(rc_eq), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, V|E}, {"rc_eq", "set rate control equation", OFFSET(rc_eq), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, V|E},
{"maxrate", "set max video bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"maxrate", "set max bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
{"minrate", "set min video bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"minrate", "set min bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
{"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|E}, {"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|E},
{"rc_buf_aggressivity", "currently useless", OFFSET(rc_buffer_aggressivity), AV_OPT_TYPE_FLOAT, {.dbl = 1.0 }, -FLT_MAX, FLT_MAX, V|E}, {"rc_buf_aggressivity", "currently useless", OFFSET(rc_buffer_aggressivity), AV_OPT_TYPE_FLOAT, {.dbl = 1.0 }, -FLT_MAX, FLT_MAX, V|E},
{"i_qfactor", "qp factor between P and I frames", OFFSET(i_quant_factor), AV_OPT_TYPE_FLOAT, {.dbl = -0.8 }, -FLT_MAX, FLT_MAX, V|E}, {"i_qfactor", "qp factor between P and I frames", OFFSET(i_quant_factor), AV_OPT_TYPE_FLOAT, {.dbl = -0.8 }, -FLT_MAX, FLT_MAX, V|E},
...@@ -296,7 +302,9 @@ static const AVOption options[]={ ...@@ -296,7 +302,9 @@ static const AVOption options[]={
{"me_range", "limit motion vectors range (1023 for DivX player)", OFFSET(me_range), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"me_range", "limit motion vectors range (1023 for DivX player)", OFFSET(me_range), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"ibias", "intra quant bias", OFFSET(intra_quant_bias), AV_OPT_TYPE_INT, {.dbl = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, V|E}, {"ibias", "intra quant bias", OFFSET(intra_quant_bias), AV_OPT_TYPE_INT, {.dbl = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, V|E},
{"pbias", "inter quant bias", OFFSET(inter_quant_bias), AV_OPT_TYPE_INT, {.dbl = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, V|E}, {"pbias", "inter quant bias", OFFSET(inter_quant_bias), AV_OPT_TYPE_INT, {.dbl = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, V|E},
#if FF_API_COLOR_TABLE_ID
{"color_table_id", NULL, OFFSET(color_table_id), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX}, {"color_table_id", NULL, OFFSET(color_table_id), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
#endif
{"global_quality", NULL, OFFSET(global_quality), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E}, {"global_quality", NULL, OFFSET(global_quality), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
{"coder", NULL, OFFSET(coder_type), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "coder"}, {"coder", NULL, OFFSET(coder_type), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "coder"},
{"vlc", "variable length coder / huffman coder", 0, AV_OPT_TYPE_CONST, {.dbl = FF_CODER_TYPE_VLC }, INT_MIN, INT_MAX, V|E, "coder"}, {"vlc", "variable length coder / huffman coder", 0, AV_OPT_TYPE_CONST, {.dbl = FF_CODER_TYPE_VLC }, INT_MIN, INT_MAX, V|E, "coder"},
...@@ -317,10 +325,14 @@ static const AVOption options[]={ ...@@ -317,10 +325,14 @@ static const AVOption options[]={
{"lmax", "max lagrange factor (VBR)", OFFSET(lmax), AV_OPT_TYPE_INT, {.dbl = 31*FF_QP2LAMBDA }, 0, INT_MAX, V|E}, {"lmax", "max lagrange factor (VBR)", OFFSET(lmax), AV_OPT_TYPE_INT, {.dbl = 31*FF_QP2LAMBDA }, 0, INT_MAX, V|E},
{"nr", "noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"nr", "noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"rc_init_occupancy", "number of bits which should be loaded into the rc buffer before decoding starts", OFFSET(rc_initial_buffer_occupancy), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"rc_init_occupancy", "number of bits which should be loaded into the rc buffer before decoding starts", OFFSET(rc_initial_buffer_occupancy), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
#if FF_API_INTER_THRESHOLD
{"inter_threshold", NULL, OFFSET(inter_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"inter_threshold", NULL, OFFSET(inter_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
#endif
{"flags2", NULL, OFFSET(flags2), AV_OPT_TYPE_FLAGS, {.dbl = DEFAULT}, 0, UINT_MAX, V|A|E|D, "flags2"}, {"flags2", NULL, OFFSET(flags2), AV_OPT_TYPE_FLAGS, {.dbl = DEFAULT}, 0, UINT_MAX, V|A|E|D, "flags2"},
{"error", NULL, OFFSET(error_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"error", NULL, OFFSET(error_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"qns", "quantizer noise shaping", OFFSET(quantizer_noise_shaping), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, #if FF_API_MPV_GLOBAL_OPTS
{"qns", "deprecated, use mpegvideo private options instead", OFFSET(quantizer_noise_shaping), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
#endif
{"threads", NULL, OFFSET(thread_count), AV_OPT_TYPE_INT, {.dbl = 1 }, 0, INT_MAX, V|E|D, "threads"}, {"threads", NULL, OFFSET(thread_count), AV_OPT_TYPE_INT, {.dbl = 1 }, 0, INT_MAX, V|E|D, "threads"},
{"auto", "detect a good number of threads", 0, AV_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, V|E|D, "threads"}, {"auto", "detect a good number of threads", 0, AV_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, V|E|D, "threads"},
{"me_threshold", "motion estimaton threshold", OFFSET(me_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"me_threshold", "motion estimaton threshold", OFFSET(me_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
...@@ -366,7 +378,9 @@ static const AVOption options[]={ ...@@ -366,7 +378,9 @@ static const AVOption options[]={
{"refs", "reference frames to consider for motion compensation", OFFSET(refs), AV_OPT_TYPE_INT, {.dbl = 1 }, INT_MIN, INT_MAX, V|E}, {"refs", "reference frames to consider for motion compensation", OFFSET(refs), AV_OPT_TYPE_INT, {.dbl = 1 }, INT_MIN, INT_MAX, V|E},
{"chromaoffset", "chroma qp offset from luma", OFFSET(chromaoffset), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"chromaoffset", "chroma qp offset from luma", OFFSET(chromaoffset), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"trellis", "rate-distortion optimal quantization", OFFSET(trellis), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E}, {"trellis", "rate-distortion optimal quantization", OFFSET(trellis), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
{"skiprd", "RD optimal MB level residual skipping", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SKIP_RD }, INT_MIN, INT_MAX, V|E, "flags2"}, #if FF_API_MPV_GLOBAL_OPTS
{"skiprd", "Deprecated, use mpegvideo private options instead", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SKIP_RD }, INT_MIN, INT_MAX, V|E, "flags2"},
#endif
{"sc_factor", "multiplied by qscale for each frame and added to scene_change_score", OFFSET(scenechange_factor), AV_OPT_TYPE_INT, {.dbl = 6 }, 0, INT_MAX, V|E}, {"sc_factor", "multiplied by qscale for each frame and added to scene_change_score", OFFSET(scenechange_factor), AV_OPT_TYPE_INT, {.dbl = 6 }, 0, INT_MAX, V|E},
{"mv0_threshold", NULL, OFFSET(mv0_threshold), AV_OPT_TYPE_INT, {.dbl = 256 }, 0, INT_MAX, V|E}, {"mv0_threshold", NULL, OFFSET(mv0_threshold), AV_OPT_TYPE_INT, {.dbl = 256 }, 0, INT_MAX, V|E},
{"b_sensitivity", "adjusts sensitivity of b_frame_strategy 1", OFFSET(b_sensitivity), AV_OPT_TYPE_INT, {.dbl = 40 }, 1, INT_MAX, V|E}, {"b_sensitivity", "adjusts sensitivity of b_frame_strategy 1", OFFSET(b_sensitivity), AV_OPT_TYPE_INT, {.dbl = 40 }, 1, INT_MAX, V|E},
......
...@@ -165,6 +165,10 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf, ...@@ -165,6 +165,10 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
ctx->picture.top_field_first = ctx->frame_type & 1; ctx->picture.top_field_first = ctx->frame_type & 1;
} }
avctx->color_primaries = buf[14];
avctx->color_trc = buf[15];
avctx->colorspace = buf[16];
ctx->alpha_info = buf[17] & 0xf; ctx->alpha_info = buf[17] & 0xf;
if (ctx->alpha_info) if (ctx->alpha_info)
av_log_missing_feature(avctx, "alpha channel", 0); av_log_missing_feature(avctx, "alpha channel", 0);
...@@ -411,7 +415,7 @@ static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td, ...@@ -411,7 +415,7 @@ static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td,
int data_size, uint16_t *out_ptr, int data_size, uint16_t *out_ptr,
int linesize, int mbs_per_slice, int linesize, int mbs_per_slice,
int blocks_per_mb, int plane_size_factor, int blocks_per_mb, int plane_size_factor,
const int16_t *qmat) const int16_t *qmat, int is_chroma)
{ {
GetBitContext gb; GetBitContext gb;
DCTELEM *block_ptr; DCTELEM *block_ptr;
...@@ -431,18 +435,33 @@ static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td, ...@@ -431,18 +435,33 @@ static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td,
/* inverse quantization, inverse transform and output */ /* inverse quantization, inverse transform and output */
block_ptr = td->blocks; block_ptr = td->blocks;
for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) { if (!is_chroma) {
ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat); for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
block_ptr += 64; ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
if (blocks_per_mb > 2) { block_ptr += 64;
ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat); if (blocks_per_mb > 2) {
ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
block_ptr += 64;
}
ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
block_ptr += 64; block_ptr += 64;
if (blocks_per_mb > 2) {
ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
block_ptr += 64;
}
} }
ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat); } else {
block_ptr += 64; for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
if (blocks_per_mb > 2) { ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat); block_ptr += 64;
ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
block_ptr += 64; block_ptr += 64;
if (blocks_per_mb > 2) {
ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
block_ptr += 64;
ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
block_ptr += 64;
}
} }
} }
} }
...@@ -523,7 +542,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata) ...@@ -523,7 +542,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata)
(uint16_t*) (y_data + (mb_y_pos << 4) * y_linesize + (uint16_t*) (y_data + (mb_y_pos << 4) * y_linesize +
(mb_x_pos << 5)), y_linesize, (mb_x_pos << 5)), y_linesize,
mbs_per_slice, 4, slice_width_factor + 2, mbs_per_slice, 4, slice_width_factor + 2,
td->qmat_luma_scaled); td->qmat_luma_scaled, 0);
/* decode U chroma plane */ /* decode U chroma plane */
decode_slice_plane(ctx, td, buf + hdr_size + y_data_size, u_data_size, decode_slice_plane(ctx, td, buf + hdr_size + y_data_size, u_data_size,
...@@ -531,7 +550,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata) ...@@ -531,7 +550,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata)
(mb_x_pos << ctx->mb_chroma_factor)), (mb_x_pos << ctx->mb_chroma_factor)),
u_linesize, mbs_per_slice, ctx->num_chroma_blocks, u_linesize, mbs_per_slice, ctx->num_chroma_blocks,
slice_width_factor + ctx->chroma_factor - 1, slice_width_factor + ctx->chroma_factor - 1,
td->qmat_chroma_scaled); td->qmat_chroma_scaled, 1);
/* decode V chroma plane */ /* decode V chroma plane */
decode_slice_plane(ctx, td, buf + hdr_size + y_data_size + u_data_size, decode_slice_plane(ctx, td, buf + hdr_size + y_data_size + u_data_size,
...@@ -540,7 +559,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata) ...@@ -540,7 +559,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata)
(mb_x_pos << ctx->mb_chroma_factor)), (mb_x_pos << ctx->mb_chroma_factor)),
v_linesize, mbs_per_slice, ctx->num_chroma_blocks, v_linesize, mbs_per_slice, ctx->num_chroma_blocks,
slice_width_factor + ctx->chroma_factor - 1, slice_width_factor + ctx->chroma_factor - 1,
td->qmat_chroma_scaled); td->qmat_chroma_scaled, 1);
return 0; return 0;
} }
......
...@@ -171,7 +171,7 @@ typedef struct ProresContext { ...@@ -171,7 +171,7 @@ typedef struct ProresContext {
static void get_slice_data(ProresContext *ctx, const uint16_t *src, static void get_slice_data(ProresContext *ctx, const uint16_t *src,
int linesize, int x, int y, int w, int h, int linesize, int x, int y, int w, int h,
DCTELEM *blocks, DCTELEM *blocks,
int mbs_per_slice, int blocks_per_mb) int mbs_per_slice, int blocks_per_mb, int is_chroma)
{ {
const uint16_t *esrc; const uint16_t *esrc;
const int mb_width = 4 * blocks_per_mb; const int mb_width = 4 * blocks_per_mb;
...@@ -189,37 +189,50 @@ static void get_slice_data(ProresContext *ctx, const uint16_t *src, ...@@ -189,37 +189,50 @@ static void get_slice_data(ProresContext *ctx, const uint16_t *src,
elinesize = linesize; elinesize = linesize;
} else { } else {
int bw, bh, pix; int bw, bh, pix;
const int estride = 16 / sizeof(*ctx->emu_buf);
esrc = ctx->emu_buf; esrc = ctx->emu_buf;
elinesize = 16; elinesize = 16 * sizeof(*ctx->emu_buf);
bw = FFMIN(w - x, mb_width); bw = FFMIN(w - x, mb_width);
bh = FFMIN(h - y, 16); bh = FFMIN(h - y, 16);
for (j = 0; j < bh; j++) { for (j = 0; j < bh; j++) {
memcpy(ctx->emu_buf + j * estride, src + j * linesize, memcpy(ctx->emu_buf + j * 16,
(const uint8_t*)src + j * linesize,
bw * sizeof(*src)); bw * sizeof(*src));
pix = ctx->emu_buf[j * estride + bw - 1]; pix = ctx->emu_buf[j * 16 + bw - 1];
for (k = bw; k < mb_width; k++) for (k = bw; k < mb_width; k++)
ctx->emu_buf[j * estride + k] = pix; ctx->emu_buf[j * 16 + k] = pix;
} }
for (; j < 16; j++) for (; j < 16; j++)
memcpy(ctx->emu_buf + j * estride, memcpy(ctx->emu_buf + j * 16,
ctx->emu_buf + (bh - 1) * estride, ctx->emu_buf + (bh - 1) * 16,
mb_width * sizeof(*ctx->emu_buf)); mb_width * sizeof(*ctx->emu_buf));
} }
ctx->dsp.fdct(esrc, elinesize, blocks); if (!is_chroma) {
blocks += 64; ctx->dsp.fdct(esrc, elinesize, blocks);
if (blocks_per_mb > 2) {
ctx->dsp.fdct(src + 8, linesize, blocks);
blocks += 64; blocks += 64;
} if (blocks_per_mb > 2) {
ctx->dsp.fdct(src + linesize * 4, linesize, blocks); ctx->dsp.fdct(src + 8, linesize, blocks);
blocks += 64; blocks += 64;
if (blocks_per_mb > 2) { }
ctx->dsp.fdct(src + linesize * 4 + 8, linesize, blocks); ctx->dsp.fdct(src + linesize * 4, linesize, blocks);
blocks += 64; blocks += 64;
if (blocks_per_mb > 2) {
ctx->dsp.fdct(src + linesize * 4 + 8, linesize, blocks);
blocks += 64;
}
} else {
ctx->dsp.fdct(esrc, elinesize, blocks);
blocks += 64;
ctx->dsp.fdct(src + linesize * 4, linesize, blocks);
blocks += 64;
if (blocks_per_mb > 2) {
ctx->dsp.fdct(src + 8, linesize, blocks);
blocks += 64;
ctx->dsp.fdct(src + linesize * 4 + 8, linesize, blocks);
blocks += 64;
}
} }
x += mb_width; x += mb_width;
...@@ -383,7 +396,7 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, ...@@ -383,7 +396,7 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
get_slice_data(ctx, src, pic->linesize[i], xp, yp, get_slice_data(ctx, src, pic->linesize[i], xp, yp,
pwidth, avctx->height, ctx->blocks[0], pwidth, avctx->height, ctx->blocks[0],
mbs_per_slice, num_cblocks); mbs_per_slice, num_cblocks, is_chroma);
sizes[i] = encode_slice_plane(ctx, pb, src, pic->linesize[i], sizes[i] = encode_slice_plane(ctx, pb, src, pic->linesize[i],
mbs_per_slice, ctx->blocks[0], mbs_per_slice, ctx->blocks[0],
num_cblocks, plane_factor, num_cblocks, plane_factor,
...@@ -539,7 +552,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, ...@@ -539,7 +552,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
get_slice_data(ctx, src, pic->linesize[i], xp, yp, get_slice_data(ctx, src, pic->linesize[i], xp, yp,
pwidth, avctx->height, ctx->blocks[i], pwidth, avctx->height, ctx->blocks[i],
mbs_per_slice, num_cblocks[i]); mbs_per_slice, num_cblocks[i], is_chroma[i]);
} }
for (q = min_quant; q < max_quant + 2; q++) { for (q = min_quant; q < max_quant + 2; q++) {
...@@ -676,9 +689,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -676,9 +689,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
bytestream_put_be16 (&buf, avctx->height); bytestream_put_be16 (&buf, avctx->height);
bytestream_put_byte (&buf, ctx->chroma_factor << 6); // frame flags bytestream_put_byte (&buf, ctx->chroma_factor << 6); // frame flags
bytestream_put_byte (&buf, 0); // reserved bytestream_put_byte (&buf, 0); // reserved
bytestream_put_byte (&buf, 0); // primaries bytestream_put_byte (&buf, avctx->color_primaries);
bytestream_put_byte (&buf, 0); // transfer function bytestream_put_byte (&buf, avctx->color_trc);
bytestream_put_byte (&buf, 6); // colour matrix - ITU-R BT.601-4 bytestream_put_byte (&buf, avctx->colorspace);
bytestream_put_byte (&buf, 0x40); // source format and alpha information bytestream_put_byte (&buf, 0x40); // source format and alpha information
bytestream_put_byte (&buf, 0); // reserved bytestream_put_byte (&buf, 0); // reserved
bytestream_put_byte (&buf, 0x03); // matrix flags - both matrices are present bytestream_put_byte (&buf, 0x03); // matrix flags - both matrices are present
......
...@@ -424,7 +424,7 @@ static av_cold int qtrle_decode_init(AVCodecContext *avctx) ...@@ -424,7 +424,7 @@ static av_cold int qtrle_decode_init(AVCodecContext *avctx)
default: default:
av_log (avctx, AV_LOG_ERROR, "Unsupported colorspace: %d bits/sample?\n", av_log (avctx, AV_LOG_ERROR, "Unsupported colorspace: %d bits/sample?\n",
avctx->bits_per_coded_sample); avctx->bits_per_coded_sample);
break; return AVERROR_INVALIDDATA;
} }
avcodec_get_frame_defaults(&s->frame); avcodec_get_frame_defaults(&s->frame);
......
...@@ -183,6 +183,8 @@ static void rpza_decode_stream(RpzaContext *s) ...@@ -183,6 +183,8 @@ static void rpza_decode_stream(RpzaContext *s)
color4[1] |= ((11 * ta + 21 * tb) >> 5); color4[1] |= ((11 * ta + 21 * tb) >> 5);
color4[2] |= ((21 * ta + 11 * tb) >> 5); color4[2] |= ((21 * ta + 11 * tb) >> 5);
if (s->size - stream_ptr < n_blocks * 4)
return;
while (n_blocks--) { while (n_blocks--) {
block_ptr = row_ptr + pixel_ptr; block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) { for (pixel_y = 0; pixel_y < 4; pixel_y++) {
...@@ -200,6 +202,8 @@ static void rpza_decode_stream(RpzaContext *s) ...@@ -200,6 +202,8 @@ static void rpza_decode_stream(RpzaContext *s)
/* Fill block with 16 colors */ /* Fill block with 16 colors */
case 0x00: case 0x00:
if (s->size - stream_ptr < 16)
return;
block_ptr = row_ptr + pixel_ptr; block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) { for (pixel_y = 0; pixel_y < 4; pixel_y++) {
for (pixel_x = 0; pixel_x < 4; pixel_x++){ for (pixel_x = 0; pixel_x < 4; pixel_x++){
......
...@@ -56,6 +56,8 @@ void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number) ...@@ -56,6 +56,8 @@ void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
put_bits(&s->pb, 3, 0); /* ignored */ put_bits(&s->pb, 3, 0); /* ignored */
} }
FF_MPV_GENERIC_CLASS(rv10)
AVCodec ff_rv10_encoder = { AVCodec ff_rv10_encoder = {
.name = "rv10", .name = "rv10",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
...@@ -66,4 +68,5 @@ AVCodec ff_rv10_encoder = { ...@@ -66,4 +68,5 @@ AVCodec ff_rv10_encoder = {
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"), .long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
.priv_class = &rv10_class,
}; };
...@@ -57,6 +57,8 @@ void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number){ ...@@ -57,6 +57,8 @@ void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number){
} }
} }
FF_MPV_GENERIC_CLASS(rv20)
AVCodec ff_rv20_encoder = { AVCodec ff_rv20_encoder = {
.name = "rv20", .name = "rv20",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
...@@ -67,4 +69,5 @@ AVCodec ff_rv20_encoder = { ...@@ -67,4 +69,5 @@ AVCodec ff_rv20_encoder = {
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"), .long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
.priv_class = &rv20_class,
}; };
...@@ -60,5 +60,14 @@ ...@@ -60,5 +60,14 @@
#ifndef FF_API_OLD_ENCODE_VIDEO #ifndef FF_API_OLD_ENCODE_VIDEO
#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 55) #define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 55)
#endif #endif
#ifndef FF_API_MPV_GLOBAL_OPTS
#define FF_API_MPV_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#ifndef FF_API_COLOR_TABLE_ID
#define FF_API_COLOR_TABLE_ID (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#ifndef FF_API_INTER_THRESHOLD
#define FF_API_INTER_THRESHOLD (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#endif /* AVCODEC_VERSION_H */ #endif /* AVCODEC_VERSION_H */
...@@ -484,7 +484,7 @@ static av_cold int decode_init(AVCodecContext *avctx) ...@@ -484,7 +484,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Unsupported bitdepth %i\n", c->bpp); av_log(avctx, AV_LOG_ERROR, "Unsupported bitdepth %i\n", c->bpp);
return AVERROR_PATCHWELCOME; return AVERROR_INVALIDDATA;
} }
return 0; return 0;
......
...@@ -57,6 +57,11 @@ static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size, ...@@ -57,6 +57,11 @@ static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
} }
rows = vp56_rac_gets(c, 8); /* number of stored macroblock rows */ rows = vp56_rac_gets(c, 8); /* number of stored macroblock rows */
cols = vp56_rac_gets(c, 8); /* number of stored macroblock cols */ cols = vp56_rac_gets(c, 8); /* number of stored macroblock cols */
if (!rows || !cols) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n",
cols << 4, rows << 4);
return 0;
}
vp56_rac_gets(c, 8); /* number of displayed macroblock rows */ vp56_rac_gets(c, 8); /* number of displayed macroblock rows */
vp56_rac_gets(c, 8); /* number of displayed macroblock cols */ vp56_rac_gets(c, 8); /* number of displayed macroblock cols */
vp56_rac_gets(c, 2); vp56_rac_gets(c, 2);
......
...@@ -77,6 +77,10 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size, ...@@ -77,6 +77,10 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
cols = buf[3]; /* number of stored macroblock cols */ cols = buf[3]; /* number of stored macroblock cols */
/* buf[4] is number of displayed macroblock rows */ /* buf[4] is number of displayed macroblock rows */
/* buf[5] is number of displayed macroblock cols */ /* buf[5] is number of displayed macroblock cols */
if (!rows || !cols) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n", cols << 4, rows << 4);
return 0;
}
if (!s->macroblocks || /* first frame */ if (!s->macroblocks || /* first frame */
16*cols != s->avctx->coded_width || 16*cols != s->avctx->coded_width ||
...@@ -97,7 +101,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size, ...@@ -97,7 +101,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
vrt_shift = 5; vrt_shift = 5;
s->sub_version = sub_version; s->sub_version = sub_version;
} else { } else {
if (!s->sub_version) if (!s->sub_version || !s->avctx->coded_width || !s->avctx->coded_height)
return 0; return 0;
if (separated_coeff || !s->filter_header) { if (separated_coeff || !s->filter_header) {
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/dict.h" #include "libavutil/dict.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "libavcodec/mpegaudio.h"
#include "avformat.h" #include "avformat.h"
#include "internal.h" #include "internal.h"
#include "avio_internal.h" #include "avio_internal.h"
...@@ -199,6 +198,8 @@ static int asf_read_file_properties(AVFormatContext *s, int64_t size) ...@@ -199,6 +198,8 @@ static int asf_read_file_properties(AVFormatContext *s, int64_t size)
asf->hdr.flags = avio_rl32(pb); asf->hdr.flags = avio_rl32(pb);
asf->hdr.min_pktsize = avio_rl32(pb); asf->hdr.min_pktsize = avio_rl32(pb);
asf->hdr.max_pktsize = avio_rl32(pb); asf->hdr.max_pktsize = avio_rl32(pb);
if (asf->hdr.min_pktsize >= (1U<<29))
return AVERROR_INVALIDDATA;
asf->hdr.max_bitrate = avio_rl32(pb); asf->hdr.max_bitrate = avio_rl32(pb);
s->packet_size = asf->hdr.max_pktsize; s->packet_size = asf->hdr.max_pktsize;
...@@ -317,25 +318,6 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size) ...@@ -317,25 +318,6 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
|| asf_st->ds_packet_size % asf_st->ds_chunk_size) || asf_st->ds_packet_size % asf_st->ds_chunk_size)
asf_st->ds_span = 0; // disable descrambling asf_st->ds_span = 0; // disable descrambling
} }
switch (st->codec->codec_id) {
case CODEC_ID_MP3:
st->codec->frame_size = MPA_FRAME_SIZE;
break;
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
case CODEC_ID_PCM_U16BE:
case CODEC_ID_PCM_S8:
case CODEC_ID_PCM_U8:
case CODEC_ID_PCM_ALAW:
case CODEC_ID_PCM_MULAW:
st->codec->frame_size = 1;
break;
default:
/* This is probably wrong, but it prevents a crash later */
st->codec->frame_size = 1;
break;
}
} else if (type == AVMEDIA_TYPE_VIDEO && } else if (type == AVMEDIA_TYPE_VIDEO &&
size - (avio_tell(pb) - pos1 + 24) >= 51) { size - (avio_tell(pb) - pos1 + 24) >= 51) {
avio_rl32(pb); avio_rl32(pb);
...@@ -612,7 +594,9 @@ static int asf_read_header(AVFormatContext *s) ...@@ -612,7 +594,9 @@ static int asf_read_header(AVFormatContext *s)
if (gsize < 24) if (gsize < 24)
return -1; return -1;
if (!ff_guidcmp(&g, &ff_asf_file_header)) { if (!ff_guidcmp(&g, &ff_asf_file_header)) {
asf_read_file_properties(s, gsize); int ret = asf_read_file_properties(s, gsize);
if (ret < 0)
return ret;
} else if (!ff_guidcmp(&g, &ff_asf_stream_header)) { } else if (!ff_guidcmp(&g, &ff_asf_stream_header)) {
asf_read_stream_properties(s, gsize); asf_read_stream_properties(s, gsize);
} else if (!ff_guidcmp(&g, &ff_asf_comment_header)) { } else if (!ff_guidcmp(&g, &ff_asf_comment_header)) {
......
...@@ -541,6 +541,13 @@ typedef struct AVIndexEntry { ...@@ -541,6 +541,13 @@ typedef struct AVIndexEntry {
#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */ #define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */
#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */ #define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */
#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ #define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */
/**
* The stream is stored in the file as an attached picture/"cover art" (e.g.
* APIC frame in ID3v2). The single packet associated with it will be returned
* among the first few packets read from the file unless seeking takes place.
* It can also be accessed at any time in AVStream.attached_pic.
*/
#define AV_DISPOSITION_ATTACHED_PIC 0x0400
/** /**
* Stream structure. * Stream structure.
...@@ -615,6 +622,15 @@ typedef struct AVStream { ...@@ -615,6 +622,15 @@ typedef struct AVStream {
*/ */
AVRational avg_frame_rate; AVRational avg_frame_rate;
/**
* For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet
* will contain the attached picture.
*
* decoding: set by libavformat, must not be modified by the caller.
* encoding: unused
*/
AVPacket attached_pic;
/***************************************************************** /*****************************************************************
* All fields below this line are not part of the public API. They * All fields below this line are not part of the public API. They
* may not be used outside of libavformat and can be changed and * may not be used outside of libavformat and can be changed and
......
...@@ -107,6 +107,8 @@ static int dxa_read_header(AVFormatContext *s) ...@@ -107,6 +107,8 @@ static int dxa_read_header(AVFormatContext *s)
ret = ff_get_wav_header(pb, ast->codec, fsize); ret = ff_get_wav_header(pb, ast->codec, fsize);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ast->codec->sample_rate > 0)
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
// find 'data' chunk // find 'data' chunk
while(avio_tell(pb) < c->vidpos && !url_feof(pb)){ while(avio_tell(pb) < c->vidpos && !url_feof(pb)){
tag = avio_rl32(pb); tag = avio_rl32(pb);
......
...@@ -338,8 +338,6 @@ static int ffm_read_header(AVFormatContext *s) ...@@ -338,8 +338,6 @@ static int ffm_read_header(AVFormatContext *s)
codec->dct_algo = avio_rb32(pb); codec->dct_algo = avio_rb32(pb);
codec->strict_std_compliance = avio_rb32(pb); codec->strict_std_compliance = avio_rb32(pb);
codec->max_b_frames = avio_rb32(pb); codec->max_b_frames = avio_rb32(pb);
codec->luma_elim_threshold = avio_rb32(pb);
codec->chroma_elim_threshold = avio_rb32(pb);
codec->mpeg_quant = avio_rb32(pb); codec->mpeg_quant = avio_rb32(pb);
codec->intra_dc_precision = avio_rb32(pb); codec->intra_dc_precision = avio_rb32(pb);
codec->me_method = avio_rb32(pb); codec->me_method = avio_rb32(pb);
......
...@@ -144,8 +144,6 @@ static int ffm_write_header(AVFormatContext *s) ...@@ -144,8 +144,6 @@ static int ffm_write_header(AVFormatContext *s)
avio_wb32(pb, codec->dct_algo); avio_wb32(pb, codec->dct_algo);
avio_wb32(pb, codec->strict_std_compliance); avio_wb32(pb, codec->strict_std_compliance);
avio_wb32(pb, codec->max_b_frames); avio_wb32(pb, codec->max_b_frames);
avio_wb32(pb, codec->luma_elim_threshold);
avio_wb32(pb, codec->chroma_elim_threshold);
avio_wb32(pb, codec->mpeg_quant); avio_wb32(pb, codec->mpeg_quant);
avio_wb32(pb, codec->intra_dc_precision); avio_wb32(pb, codec->intra_dc_precision);
avio_wb32(pb, codec->me_method); avio_wb32(pb, codec->me_method);
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "libavutil/dict.h" #include "libavutil/dict.h"
#include "avio_internal.h" #include "avio_internal.h"
#include "internal.h"
const AVMetadataConv ff_id3v2_34_metadata_conv[] = { const AVMetadataConv ff_id3v2_34_metadata_conv[] = {
{ "TALB", "album"}, { "TALB", "album"},
...@@ -99,6 +100,38 @@ const char ff_id3v2_3_tags[][4] = { ...@@ -99,6 +100,38 @@ const char ff_id3v2_3_tags[][4] = {
{ 0 }, { 0 },
}; };
const char *ff_id3v2_picture_types[21] = {
"Other",
"32x32 pixels 'file icon'",
"Other file icon",
"Cover (front)",
"Cover (back)",
"Leaflet page",
"Media (e.g. label side of CD)",
"Lead artist/lead performer/soloist",
"Artist/performer",
"Conductor",
"Band/Orchestra",
"Composer",
"Lyricist/text writer",
"Recording Location",
"During recording",
"During performance",
"Movie/video screen capture",
"A bright coloured fish",
"Illustration",
"Band/artist logotype",
"Publisher/Studio logotype",
};
const CodecMime ff_id3v2_mime_tags[] = {
{"image/gif" , CODEC_ID_GIF},
{"image/jpeg", CODEC_ID_MJPEG},
{"image/png" , CODEC_ID_PNG},
{"image/tiff", CODEC_ID_TIFF},
{"", CODEC_ID_NONE},
};
int ff_id3v2_match(const uint8_t *buf, const char * magic) int ff_id3v2_match(const uint8_t *buf, const char * magic)
{ {
return buf[0] == magic[0] && return buf[0] == magic[0] &&
...@@ -394,6 +427,84 @@ finish: ...@@ -394,6 +427,84 @@ finish:
av_dict_set(m, "date", date, 0); av_dict_set(m, "date", date, 0);
} }
static void free_apic(void *obj)
{
ID3v2ExtraMetaAPIC *apic = obj;
av_freep(&apic->data);
av_freep(&apic->description);
av_freep(&apic);
}
static void read_apic(AVFormatContext *s, AVIOContext *pb, int taglen, char *tag, ID3v2ExtraMeta **extra_meta)
{
int enc, pic_type;
char mimetype[64];
const CodecMime *mime = ff_id3v2_mime_tags;
enum CodecID id = CODEC_ID_NONE;
ID3v2ExtraMetaAPIC *apic = NULL;
ID3v2ExtraMeta *new_extra = NULL;
int64_t end = avio_tell(pb) + taglen;
if (taglen <= 4)
goto fail;
new_extra = av_mallocz(sizeof(*new_extra));
apic = av_mallocz(sizeof(*apic));
if (!new_extra || !apic)
goto fail;
enc = avio_r8(pb);
taglen--;
/* mimetype */
taglen -= avio_get_str(pb, taglen, mimetype, sizeof(mimetype));
while (mime->id != CODEC_ID_NONE) {
if (!strncmp(mime->str, mimetype, sizeof(mimetype))) {
id = mime->id;
break;
}
mime++;
}
if (id == CODEC_ID_NONE) {
av_log(s, AV_LOG_WARNING, "Unknown attached picture mimetype: %s, skipping.\n", mimetype);
goto fail;
}
apic->id = id;
/* picture type */
pic_type = avio_r8(pb);
taglen--;
if (pic_type < 0 || pic_type >= FF_ARRAY_ELEMS(ff_id3v2_picture_types)) {
av_log(s, AV_LOG_WARNING, "Unknown attached picture type %d.\n", pic_type);
pic_type = 0;
}
apic->type = ff_id3v2_picture_types[pic_type];
/* description and picture data */
if (decode_str(s, pb, enc, &apic->description, &taglen) < 0) {
av_log(s, AV_LOG_ERROR, "Error decoding attached picture description.\n");
goto fail;
}
apic->len = taglen;
apic->data = av_malloc(taglen);
if (!apic->data || avio_read(pb, apic->data, taglen) != taglen)
goto fail;
new_extra->tag = "APIC";
new_extra->data = apic;
new_extra->next = *extra_meta;
*extra_meta = new_extra;
return;
fail:
if (apic)
free_apic(apic);
av_freep(&new_extra);
avio_seek(pb, end, SEEK_SET);
}
typedef struct ID3v2EMFunc { typedef struct ID3v2EMFunc {
const char *tag3; const char *tag3;
const char *tag4; const char *tag4;
...@@ -403,6 +514,7 @@ typedef struct ID3v2EMFunc { ...@@ -403,6 +514,7 @@ typedef struct ID3v2EMFunc {
static const ID3v2EMFunc id3v2_extra_meta_funcs[] = { static const ID3v2EMFunc id3v2_extra_meta_funcs[] = {
{ "GEO", "GEOB", read_geobtag, free_geobtag }, { "GEO", "GEOB", read_geobtag, free_geobtag },
{ "PIC", "APIC", read_apic, free_apic },
{ NULL } { NULL }
}; };
...@@ -620,7 +732,7 @@ seek: ...@@ -620,7 +732,7 @@ seek:
return; return;
} }
void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta) void ff_id3v2_read(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta)
{ {
int len, ret; int len, ret;
uint8_t buf[ID3v2_HEADER_SIZE]; uint8_t buf[ID3v2_HEADER_SIZE];
...@@ -651,11 +763,6 @@ void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **e ...@@ -651,11 +763,6 @@ void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **e
merge_date(&s->metadata); merge_date(&s->metadata);
} }
void ff_id3v2_read(AVFormatContext *s, const char *magic)
{
ff_id3v2_read_all(s, magic, NULL);
}
void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta) void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta)
{ {
ID3v2ExtraMeta *current = *extra_meta, *next; ID3v2ExtraMeta *current = *extra_meta, *next;
...@@ -669,3 +776,37 @@ void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta) ...@@ -669,3 +776,37 @@ void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta)
current = next; current = next;
} }
} }
int ff_id3v2_parse_apic(AVFormatContext *s, ID3v2ExtraMeta **extra_meta)
{
ID3v2ExtraMeta *cur;
for (cur = *extra_meta; cur; cur = cur->next) {
ID3v2ExtraMetaAPIC *apic;
AVStream *st;
if (strcmp(cur->tag, "APIC"))
continue;
apic = cur->data;
if (!(st = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
st->disposition |= AV_DISPOSITION_ATTACHED_PIC;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = apic->id;
av_dict_set(&st->metadata, "title", apic->description, 0);
av_dict_set(&st->metadata, "comment", apic->type, 0);
av_init_packet(&st->attached_pic);
st->attached_pic.data = apic->data;
st->attached_pic.size = apic->len;
st->attached_pic.destruct = av_destruct_packet;
st->attached_pic.stream_index = st->index;
apic->data = NULL;
apic->len = 0;
}
return 0;
}
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <stdint.h> #include <stdint.h>
#include "avformat.h" #include "avformat.h"
#include "internal.h"
#include "metadata.h" #include "metadata.h"
#define ID3v2_HEADER_SIZE 10 #define ID3v2_HEADER_SIZE 10
...@@ -45,6 +46,12 @@ enum ID3v2Encoding { ...@@ -45,6 +46,12 @@ enum ID3v2Encoding {
ID3v2_ENCODING_UTF8 = 3, ID3v2_ENCODING_UTF8 = 3,
}; };
typedef struct ID3v2EncContext {
int version; ///< ID3v2 minor version, either 3 or 4
int64_t size_pos; ///< offset of the tag total size
int len; ///< size of the tag written so far
} ID3v2EncContext;
typedef struct ID3v2ExtraMeta { typedef struct ID3v2ExtraMeta {
const char *tag; const char *tag;
void *data; void *data;
...@@ -59,6 +66,14 @@ typedef struct ID3v2ExtraMetaGEOB { ...@@ -59,6 +66,14 @@ typedef struct ID3v2ExtraMetaGEOB {
uint8_t *data; uint8_t *data;
} ID3v2ExtraMetaGEOB; } ID3v2ExtraMetaGEOB;
typedef struct ID3v2ExtraMetaAPIC {
uint8_t *data;
int len;
const char *type;
uint8_t *description;
enum CodecID id;
} ID3v2ExtraMetaAPIC;
/** /**
* Detect ID3v2 Header. * Detect ID3v2 Header.
* @param buf must be ID3v2_HEADER_SIZE byte long * @param buf must be ID3v2_HEADER_SIZE byte long
...@@ -75,24 +90,40 @@ int ff_id3v2_match(const uint8_t *buf, const char *magic); ...@@ -75,24 +90,40 @@ int ff_id3v2_match(const uint8_t *buf, const char *magic);
int ff_id3v2_tag_len(const uint8_t *buf); int ff_id3v2_tag_len(const uint8_t *buf);
/** /**
* Read an ID3v2 tag (text tags only) * Read an ID3v2 tag, including supported extra metadata
* @param extra_meta If not NULL, extra metadata is parsed into a list of
* ID3v2ExtraMeta structs and *extra_meta points to the head of the list
*/ */
void ff_id3v2_read(AVFormatContext *s, const char *magic); void ff_id3v2_read(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta);
/** /**
* Read an ID3v2 tag, including supported extra metadata (currently only GEOB) * Initialize an ID3v2 tag.
* @param extra_meta If not NULL, extra metadata is parsed into a list of */
* ID3v2ExtraMeta structs and *extra_meta points to the head of the list void ff_id3v2_start(ID3v2EncContext *id3, AVIOContext *pb, int id3v2_version,
const char *magic);
/**
* Convert and write all global metadata from s into an ID3v2 tag.
*/
int ff_id3v2_write_metadata(AVFormatContext *s, ID3v2EncContext *id3);
/**
* Write an attached picture from pkt into an ID3v2 tag.
*/
int ff_id3v2_write_apic(AVFormatContext *s, ID3v2EncContext *id3, AVPacket *pkt);
/**
* Finalize an opened ID3v2 tag.
*/ */
void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta); void ff_id3v2_finish(ID3v2EncContext *id3, AVIOContext *pb);
/** /**
* Write an ID3v2 tag. * Write an ID3v2 tag containing all global metadata from s.
* @param id3v2_version Subversion of ID3v2; supported values are 3 and 4 * @param id3v2_version Subversion of ID3v2; supported values are 3 and 4
* @param magic magic bytes to identify the header * @param magic magic bytes to identify the header
* If in doubt, use ID3v2_DEFAULT_MAGIC. * If in doubt, use ID3v2_DEFAULT_MAGIC.
*/ */
int ff_id3v2_write(struct AVFormatContext *s, int id3v2_version, const char *magic); int ff_id3v2_write_simple(struct AVFormatContext *s, int id3v2_version, const char *magic);
/** /**
* Free memory allocated parsing special (non-text) metadata. * Free memory allocated parsing special (non-text) metadata.
...@@ -100,6 +131,12 @@ int ff_id3v2_write(struct AVFormatContext *s, int id3v2_version, const char *mag ...@@ -100,6 +131,12 @@ int ff_id3v2_write(struct AVFormatContext *s, int id3v2_version, const char *mag
*/ */
void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta); void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta);
/**
* Create a stream for each APIC (attached picture) extracted from the
* ID3v2 header.
*/
int ff_id3v2_parse_apic(AVFormatContext *s, ID3v2ExtraMeta **extra_meta);
extern const AVMetadataConv ff_id3v2_34_metadata_conv[]; extern const AVMetadataConv ff_id3v2_34_metadata_conv[];
extern const AVMetadataConv ff_id3v2_4_metadata_conv[]; extern const AVMetadataConv ff_id3v2_4_metadata_conv[];
...@@ -120,4 +157,8 @@ extern const char ff_id3v2_4_tags[][4]; ...@@ -120,4 +157,8 @@ extern const char ff_id3v2_4_tags[][4];
*/ */
extern const char ff_id3v2_3_tags[][4]; extern const char ff_id3v2_3_tags[][4];
extern const CodecMime ff_id3v2_mime_tags[];
extern const char *ff_id3v2_picture_types[21];
#endif /* AVFORMAT_ID3V2_H */ #endif /* AVFORMAT_ID3V2_H */
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
*/ */
#include <stdint.h> #include <stdint.h>
#include <string.h>
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/dict.h" #include "libavutil/dict.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
...@@ -26,12 +28,12 @@ ...@@ -26,12 +28,12 @@
#include "avio.h" #include "avio.h"
#include "id3v2.h" #include "id3v2.h"
static void id3v2_put_size(AVFormatContext *s, int size) static void id3v2_put_size(AVIOContext *pb, int size)
{ {
avio_w8(s->pb, size >> 21 & 0x7f); avio_w8(pb, size >> 21 & 0x7f);
avio_w8(s->pb, size >> 14 & 0x7f); avio_w8(pb, size >> 14 & 0x7f);
avio_w8(s->pb, size >> 7 & 0x7f); avio_w8(pb, size >> 7 & 0x7f);
avio_w8(s->pb, size & 0x7f); avio_w8(pb, size & 0x7f);
} }
static int string_is_ascii(const uint8_t *str) static int string_is_ascii(const uint8_t *str)
...@@ -40,17 +42,30 @@ static int string_is_ascii(const uint8_t *str) ...@@ -40,17 +42,30 @@ static int string_is_ascii(const uint8_t *str)
return !*str; return !*str;
} }
static void id3v2_encode_string(AVIOContext *pb, const uint8_t *str,
enum ID3v2Encoding enc)
{
int (*put)(AVIOContext*, const char*);
if (enc == ID3v2_ENCODING_UTF16BOM) {
avio_wl16(pb, 0xFEFF); /* BOM */
put = avio_put_str16le;
} else
put = avio_put_str;
put(pb, str);
}
/** /**
* Write a text frame with one (normal frames) or two (TXXX frames) strings * Write a text frame with one (normal frames) or two (TXXX frames) strings
* according to encoding (only UTF-8 or UTF-16+BOM supported). * according to encoding (only UTF-8 or UTF-16+BOM supported).
* @return number of bytes written or a negative error code. * @return number of bytes written or a negative error code.
*/ */
static int id3v2_put_ttag(AVFormatContext *s, const char *str1, const char *str2, static int id3v2_put_ttag(ID3v2EncContext *id3, AVIOContext *avioc, const char *str1, const char *str2,
uint32_t tag, enum ID3v2Encoding enc) uint32_t tag, enum ID3v2Encoding enc)
{ {
int len; int len;
uint8_t *pb; uint8_t *pb;
int (*put)(AVIOContext*, const char*);
AVIOContext *dyn_buf; AVIOContext *dyn_buf;
if (avio_open_dyn_buf(&dyn_buf) < 0) if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
...@@ -62,28 +77,26 @@ static int id3v2_put_ttag(AVFormatContext *s, const char *str1, const char *str2 ...@@ -62,28 +77,26 @@ static int id3v2_put_ttag(AVFormatContext *s, const char *str1, const char *str2
enc = ID3v2_ENCODING_ISO8859; enc = ID3v2_ENCODING_ISO8859;
avio_w8(dyn_buf, enc); avio_w8(dyn_buf, enc);
if (enc == ID3v2_ENCODING_UTF16BOM) { id3v2_encode_string(dyn_buf, str1, enc);
avio_wl16(dyn_buf, 0xFEFF); /* BOM */
put = avio_put_str16le;
} else
put = avio_put_str;
put(dyn_buf, str1);
if (str2) if (str2)
put(dyn_buf, str2); id3v2_encode_string(dyn_buf, str2, enc);
len = avio_close_dyn_buf(dyn_buf, &pb); len = avio_close_dyn_buf(dyn_buf, &pb);
avio_wb32(s->pb, tag); avio_wb32(avioc, tag);
id3v2_put_size(s, len); /* ID3v2.3 frame size is not synchsafe */
avio_wb16(s->pb, 0); if (id3->version == 3)
avio_write(s->pb, pb, len); avio_wb32(avioc, len);
else
id3v2_put_size(avioc, len);
avio_wb16(avioc, 0);
avio_write(avioc, pb, len);
av_freep(&pb); av_freep(&pb);
return len + ID3v2_HEADER_SIZE; return len + ID3v2_HEADER_SIZE;
} }
static int id3v2_check_write_tag(AVFormatContext *s, AVDictionaryEntry *t, const char table[][4], static int id3v2_check_write_tag(ID3v2EncContext *id3, AVIOContext *pb, AVDictionaryEntry *t,
enum ID3v2Encoding enc) const char table[][4], enum ID3v2Encoding enc)
{ {
uint32_t tag; uint32_t tag;
int i; int i;
...@@ -93,7 +106,7 @@ static int id3v2_check_write_tag(AVFormatContext *s, AVDictionaryEntry *t, const ...@@ -93,7 +106,7 @@ static int id3v2_check_write_tag(AVFormatContext *s, AVDictionaryEntry *t, const
tag = AV_RB32(t->key); tag = AV_RB32(t->key);
for (i = 0; *table[i]; i++) for (i = 0; *table[i]; i++)
if (tag == AV_RB32(table[i])) if (tag == AV_RB32(table[i]))
return id3v2_put_ttag(s, t->value, NULL, tag, enc); return id3v2_put_ttag(id3, pb, t->value, NULL, tag, enc);
return -1; return -1;
} }
...@@ -135,52 +148,137 @@ static void id3v2_3_metadata_split_date(AVDictionary **pm) ...@@ -135,52 +148,137 @@ static void id3v2_3_metadata_split_date(AVDictionary **pm)
*pm = dst; *pm = dst;
} }
int ff_id3v2_write(struct AVFormatContext *s, int id3v2_version, void ff_id3v2_start(ID3v2EncContext *id3, AVIOContext *pb, int id3v2_version,
const char *magic) const char *magic)
{ {
int64_t size_pos, cur_pos; id3->version = id3v2_version;
AVDictionaryEntry *t = NULL;
int totlen = 0, enc = id3v2_version == 3 ? ID3v2_ENCODING_UTF16BOM : avio_wb32(pb, MKBETAG(magic[0], magic[1], magic[2], id3v2_version));
ID3v2_ENCODING_UTF8; avio_w8(pb, 0);
avio_w8(pb, 0); /* flags */
avio_wb32(s->pb, MKBETAG(magic[0], magic[1], magic[2], id3v2_version));
avio_w8(s->pb, 0);
avio_w8(s->pb, 0); /* flags */
/* reserve space for size */ /* reserve space for size */
size_pos = avio_tell(s->pb); id3->size_pos = avio_tell(pb);
avio_wb32(s->pb, 0); avio_wb32(pb, 0);
}
int ff_id3v2_write_metadata(AVFormatContext *s, ID3v2EncContext *id3)
{
AVDictionaryEntry *t = NULL;
int enc = id3->version == 3 ? ID3v2_ENCODING_UTF16BOM :
ID3v2_ENCODING_UTF8;
ff_metadata_conv(&s->metadata, ff_id3v2_34_metadata_conv, NULL); ff_metadata_conv(&s->metadata, ff_id3v2_34_metadata_conv, NULL);
if (id3v2_version == 3) if (id3->version == 3)
id3v2_3_metadata_split_date(&s->metadata); id3v2_3_metadata_split_date(&s->metadata);
else if (id3v2_version == 4) else if (id3->version == 4)
ff_metadata_conv(&s->metadata, ff_id3v2_4_metadata_conv, NULL); ff_metadata_conv(&s->metadata, ff_id3v2_4_metadata_conv, NULL);
while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) { while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
int ret; int ret;
if ((ret = id3v2_check_write_tag(s, t, ff_id3v2_tags, enc)) > 0) { if ((ret = id3v2_check_write_tag(id3, s->pb, t, ff_id3v2_tags, enc)) > 0) {
totlen += ret; id3->len += ret;
continue; continue;
} }
if ((ret = id3v2_check_write_tag(s, t, id3v2_version == 3 ? if ((ret = id3v2_check_write_tag(id3, s->pb, t, id3->version == 3 ?
ff_id3v2_3_tags : ff_id3v2_4_tags, enc)) > 0) { ff_id3v2_3_tags : ff_id3v2_4_tags, enc)) > 0) {
totlen += ret; id3->len += ret;
continue; continue;
} }
/* unknown tag, write as TXXX frame */ /* unknown tag, write as TXXX frame */
if ((ret = id3v2_put_ttag(s, t->key, t->value, MKBETAG('T', 'X', 'X', 'X'), enc)) < 0) if ((ret = id3v2_put_ttag(id3, s->pb, t->key, t->value, MKBETAG('T', 'X', 'X', 'X'), enc)) < 0)
return ret; return ret;
totlen += ret; id3->len += ret;
} }
cur_pos = avio_tell(s->pb); return 0;
avio_seek(s->pb, size_pos, SEEK_SET); }
id3v2_put_size(s, totlen);
avio_seek(s->pb, cur_pos, SEEK_SET); int ff_id3v2_write_apic(AVFormatContext *s, ID3v2EncContext *id3, AVPacket *pkt)
{
AVStream *st = s->streams[pkt->stream_index];
AVDictionaryEntry *e;
AVIOContext *dyn_buf;
uint8_t *buf;
const CodecMime *mime = ff_id3v2_mime_tags;
const char *mimetype = NULL, *desc = "";
int enc = id3->version == 3 ? ID3v2_ENCODING_UTF16BOM :
ID3v2_ENCODING_UTF8;
int i, len, type = 0;
/* get the mimetype*/
while (mime->id != CODEC_ID_NONE) {
if (mime->id == st->codec->codec_id) {
mimetype = mime->str;
break;
}
mime++;
}
if (!mimetype) {
av_log(s, AV_LOG_ERROR, "No mimetype is known for stream %d, cannot "
"write an attached picture.\n", st->index);
return AVERROR(EINVAL);
}
/* get the picture type */
e = av_dict_get(st->metadata, "comment", NULL, 0);
for (i = 0; e && i < FF_ARRAY_ELEMS(ff_id3v2_picture_types); i++) {
if (strstr(ff_id3v2_picture_types[i], e->value) == ff_id3v2_picture_types[i]) {
type = i;
break;
}
}
/* get the description */
if ((e = av_dict_get(st->metadata, "title", NULL, 0)))
desc = e->value;
/* start writing */
if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
avio_w8(dyn_buf, enc);
avio_put_str(dyn_buf, mimetype);
avio_w8(dyn_buf, type);
id3v2_encode_string(dyn_buf, desc, enc);
avio_write(dyn_buf, pkt->data, pkt->size);
len = avio_close_dyn_buf(dyn_buf, &buf);
avio_wb32(s->pb, MKBETAG('A', 'P', 'I', 'C'));
if (id3->version == 3)
avio_wb32(s->pb, len);
else
id3v2_put_size(s->pb, len);
avio_wb16(s->pb, 0);
avio_write(s->pb, buf, len);
av_freep(&buf);
id3->len += len + ID3v2_HEADER_SIZE;
return 0;
}
void ff_id3v2_finish(ID3v2EncContext *id3, AVIOContext *pb)
{
int64_t cur_pos = avio_tell(pb);
avio_seek(pb, id3->size_pos, SEEK_SET);
id3v2_put_size(pb, id3->len);
avio_seek(pb, cur_pos, SEEK_SET);
}
int ff_id3v2_write_simple(struct AVFormatContext *s, int id3v2_version,
const char *magic)
{
ID3v2EncContext id3 = { 0 };
int ret;
ff_id3v2_start(&id3, s->pb, id3v2_version, magic);
if ((ret = ff_id3v2_write_metadata(s, &id3)) < 0)
return ret;
ff_id3v2_finish(&id3, s->pb);
return 0; return 0;
} }
...@@ -37,6 +37,11 @@ typedef struct AVCodecTag { ...@@ -37,6 +37,11 @@ typedef struct AVCodecTag {
unsigned int tag; unsigned int tag;
} AVCodecTag; } AVCodecTag;
typedef struct CodecMime{
char str[32];
enum CodecID id;
} CodecMime;
#ifdef __GNUC__ #ifdef __GNUC__
#define dynarray_add(tab, nb_ptr, elem)\ #define dynarray_add(tab, nb_ptr, elem)\
do {\ do {\
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
#include "metadata.h" #include "metadata.h"
#include "internal.h"
/* EBML version supported */ /* EBML version supported */
#define EBML_VERSION 1 #define EBML_VERSION 1
...@@ -250,11 +251,6 @@ typedef struct CodecTags{ ...@@ -250,11 +251,6 @@ typedef struct CodecTags{
enum CodecID id; enum CodecID id;
}CodecTags; }CodecTags;
typedef struct CodecMime{
char str[32];
enum CodecID id;
}CodecMime;
/* max. depth in the EBML tree structure */ /* max. depth in the EBML tree structure */
#define EBML_MAX_DEPTH 16 #define EBML_MAX_DEPTH 16
......
...@@ -1223,6 +1223,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) ...@@ -1223,6 +1223,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
if (st->codec->codec_type==AVMEDIA_TYPE_VIDEO) { if (st->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
unsigned int color_depth, len; unsigned int color_depth, len;
int color_greyscale; int color_greyscale;
int color_table_id;
st->codec->codec_id = id; st->codec->codec_id = id;
avio_rb16(pb); /* version */ avio_rb16(pb); /* version */
...@@ -1250,9 +1251,9 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) ...@@ -1250,9 +1251,9 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
st->codec->codec_tag=MKTAG('I', '4', '2', '0'); st->codec->codec_tag=MKTAG('I', '4', '2', '0');
st->codec->bits_per_coded_sample = avio_rb16(pb); /* depth */ st->codec->bits_per_coded_sample = avio_rb16(pb); /* depth */
st->codec->color_table_id = avio_rb16(pb); /* colortable id */ color_table_id = avio_rb16(pb); /* colortable id */
av_dlog(c->fc, "depth %d, ctab id %d\n", av_dlog(c->fc, "depth %d, ctab id %d\n",
st->codec->bits_per_coded_sample, st->codec->color_table_id); st->codec->bits_per_coded_sample, color_table_id);
/* figure out the palette situation */ /* figure out the palette situation */
color_depth = st->codec->bits_per_coded_sample & 0x1F; color_depth = st->codec->bits_per_coded_sample & 0x1F;
color_greyscale = st->codec->bits_per_coded_sample & 0x20; color_greyscale = st->codec->bits_per_coded_sample & 0x20;
...@@ -1282,7 +1283,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) ...@@ -1282,7 +1283,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
if (color_index < 0) if (color_index < 0)
color_index = 0; color_index = 0;
} }
} else if (st->codec->color_table_id) { } else if (color_table_id) {
const uint8_t *color_table; const uint8_t *color_table;
/* if flag bit 3 is set, use the default palette */ /* if flag bit 3 is set, use the default palette */
color_count = 1 << color_depth; color_count = 1 << color_depth;
......
...@@ -84,6 +84,7 @@ static int id3v1_create_tag(AVFormatContext *s, uint8_t *buf) ...@@ -84,6 +84,7 @@ static int id3v1_create_tag(AVFormatContext *s, uint8_t *buf)
typedef struct MP3Context { typedef struct MP3Context {
const AVClass *class; const AVClass *class;
ID3v2EncContext id3;
int id3v2_version; int id3v2_version;
int write_id3v1; int write_id3v1;
int64_t frames_offset; int64_t frames_offset;
...@@ -93,60 +94,15 @@ typedef struct MP3Context { ...@@ -93,60 +94,15 @@ typedef struct MP3Context {
uint32_t seen; uint32_t seen;
uint32_t pos; uint32_t pos;
uint64_t bag[VBR_NUM_BAGS]; uint64_t bag[VBR_NUM_BAGS];
} MP3Context;
static int mp2_write_trailer(struct AVFormatContext *s)
{
uint8_t buf[ID3v1_TAG_SIZE];
MP3Context *mp3 = s->priv_data;
/* write the id3v1 tag */
if (mp3 && mp3->write_id3v1 && id3v1_create_tag(s, buf) > 0) {
avio_write(s->pb, buf, ID3v1_TAG_SIZE);
}
/* write number of frames */ /* index of the audio stream */
if (mp3 && mp3->frames_offset) { int audio_stream_idx;
avio_seek(s->pb, mp3->frames_offset, SEEK_SET); /* number of attached pictures we still need to write */
avio_wb32(s->pb, s->streams[0]->nb_frames); int pics_to_write;
avio_seek(s->pb, 0, SEEK_END);
}
avio_flush(s->pb);
return 0;
}
#if CONFIG_MP2_MUXER
AVOutputFormat ff_mp2_muxer = {
.name = "mp2",
.long_name = NULL_IF_CONFIG_SMALL("MPEG audio layer 2"),
.mime_type = "audio/x-mpeg",
.extensions = "mp2,m2a",
.audio_codec = CODEC_ID_MP2,
.video_codec = CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.write_trailer = mp2_write_trailer,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_MP3_MUXER
static const AVOption options[] = {
{ "id3v2_version", "Select ID3v2 version to write. Currently 3 and 4 are supported.",
offsetof(MP3Context, id3v2_version), AV_OPT_TYPE_INT, {.dbl = 4}, 3, 4, AV_OPT_FLAG_ENCODING_PARAM},
{ "write_id3v1", "Enable ID3v1 writing. ID3v1 tags are written in UTF-8 which may not be supported by most software.",
offsetof(MP3Context, write_id3v1), AV_OPT_TYPE_INT, {.dbl = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ NULL },
};
static const AVClass mp3_muxer_class = { /* audio packets are queued here until we get all the attached pictures */
.class_name = "MP3 muxer", AVPacketList *queue, *queue_end;
.item_name = av_default_item_name, } MP3Context;
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}}; static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}};
...@@ -155,8 +111,8 @@ static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}}; ...@@ -155,8 +111,8 @@ static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}};
*/ */
static int mp3_write_xing(AVFormatContext *s) static int mp3_write_xing(AVFormatContext *s)
{ {
AVCodecContext *codec = s->streams[0]->codec;
MP3Context *mp3 = s->priv_data; MP3Context *mp3 = s->priv_data;
AVCodecContext *codec = s->streams[mp3->audio_stream_idx]->codec;
int bitrate_idx; int bitrate_idx;
int best_bitrate_idx = -1; int best_bitrate_idx = -1;
int best_bitrate_error= INT_MAX; int best_bitrate_error= INT_MAX;
...@@ -166,6 +122,9 @@ static int mp3_write_xing(AVFormatContext *s) ...@@ -166,6 +122,9 @@ static int mp3_write_xing(AVFormatContext *s)
int srate_idx, i, channels; int srate_idx, i, channels;
int needed; int needed;
if (!s->pb->seekable)
return 0;
for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++) for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++)
if (avpriv_mpa_freq_tab[i] == codec->sample_rate) { if (avpriv_mpa_freq_tab[i] == codec->sample_rate) {
srate_idx = i; srate_idx = i;
...@@ -295,26 +254,7 @@ static void mp3_fix_xing(AVFormatContext *s) ...@@ -295,26 +254,7 @@ static void mp3_fix_xing(AVFormatContext *s)
avio_seek(s->pb, 0, SEEK_END); avio_seek(s->pb, 0, SEEK_END);
} }
/** static int mp3_write_packet_internal(AVFormatContext *s, AVPacket *pkt)
* Write an ID3v2 header at beginning of stream
*/
static int mp3_write_header(struct AVFormatContext *s)
{
MP3Context *mp3 = s->priv_data;
int ret;
ret = ff_id3v2_write(s, mp3->id3v2_version, ID3v2_DEFAULT_MAGIC);
if (ret < 0)
return ret;
if (s->pb->seekable)
mp3_write_xing(s);
return 0;
}
static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt)
{ {
if (! pkt || ! pkt->data || pkt->size < 4) if (! pkt || ! pkt->data || pkt->size < 4)
return ff_raw_write_packet(s, pkt); return ff_raw_write_packet(s, pkt);
...@@ -350,6 +290,175 @@ static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt) ...@@ -350,6 +290,175 @@ static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt)
} }
} }
static int mp3_queue_flush(AVFormatContext *s)
{
MP3Context *mp3 = s->priv_data;
AVPacketList *pktl;
int ret = 0, write = 1;
ff_id3v2_finish(&mp3->id3, s->pb);
mp3_write_xing(s);
while ((pktl = mp3->queue)) {
if (write && (ret = mp3_write_packet_internal(s, &pktl->pkt)) < 0)
write = 0;
av_free_packet(&pktl->pkt);
mp3->queue = pktl->next;
av_freep(&pktl);
}
mp3->queue_end = NULL;
return ret;
}
static int mp2_write_trailer(struct AVFormatContext *s)
{
uint8_t buf[ID3v1_TAG_SIZE];
MP3Context *mp3 = s->priv_data;
if (mp3 && mp3->pics_to_write) {
av_log(s, AV_LOG_WARNING, "No packets were sent for some of the "
"attached pictures.\n");
mp3_queue_flush(s);
}
/* write the id3v1 tag */
if (mp3 && mp3->write_id3v1 && id3v1_create_tag(s, buf) > 0) {
avio_write(s->pb, buf, ID3v1_TAG_SIZE);
}
/* write number of frames */
if (mp3 && mp3->frames_offset) {
avio_seek(s->pb, mp3->frames_offset, SEEK_SET);
avio_wb32(s->pb, s->streams[mp3->audio_stream_idx]->nb_frames);
avio_seek(s->pb, 0, SEEK_END);
}
avio_flush(s->pb);
return 0;
}
#if CONFIG_MP2_MUXER
AVOutputFormat ff_mp2_muxer = {
.name = "mp2",
.long_name = NULL_IF_CONFIG_SMALL("MPEG audio layer 2"),
.mime_type = "audio/x-mpeg",
.extensions = "mp2,m2a",
.audio_codec = CODEC_ID_MP2,
.video_codec = CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.write_trailer = mp2_write_trailer,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_MP3_MUXER
static const AVOption options[] = {
{ "id3v2_version", "Select ID3v2 version to write. Currently 3 and 4 are supported.",
offsetof(MP3Context, id3v2_version), AV_OPT_TYPE_INT, {.dbl = 4}, 3, 4, AV_OPT_FLAG_ENCODING_PARAM},
{ "write_id3v1", "Enable ID3v1 writing. ID3v1 tags are written in UTF-8 which may not be supported by most software.",
offsetof(MP3Context, write_id3v1), AV_OPT_TYPE_INT, {.dbl = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ NULL },
};
static const AVClass mp3_muxer_class = {
.class_name = "MP3 muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt)
{
MP3Context *mp3 = s->priv_data;
if (pkt->stream_index == mp3->audio_stream_idx) {
if (mp3->pics_to_write) {
/* buffer audio packets until we get all the pictures */
AVPacketList *pktl = av_mallocz(sizeof(*pktl));
if (!pktl)
return AVERROR(ENOMEM);
pktl->pkt = *pkt;
pkt->destruct = NULL;
if (mp3->queue_end)
mp3->queue_end->next = pktl;
else
mp3->queue = pktl;
mp3->queue_end = pktl;
} else
return mp3_write_packet_internal(s, pkt);
} else {
int ret;
/* warn only once for each stream */
if (s->streams[pkt->stream_index]->nb_frames == 1) {
av_log(s, AV_LOG_WARNING, "Got more than one picture in stream %d,"
" ignoring.\n", pkt->stream_index);
}
if (!mp3->pics_to_write || s->streams[pkt->stream_index]->nb_frames >= 1)
return 0;
if ((ret = ff_id3v2_write_apic(s, &mp3->id3, pkt)) < 0)
return ret;
mp3->pics_to_write--;
/* flush the buffered audio packets */
if (!mp3->pics_to_write &&
(ret = mp3_queue_flush(s)) < 0)
return ret;
}
return 0;
}
/**
* Write an ID3v2 header at beginning of stream
*/
static int mp3_write_header(struct AVFormatContext *s)
{
MP3Context *mp3 = s->priv_data;
int ret, i;
/* check the streams -- we want exactly one audio and arbitrary number of
* video (attached pictures) */
mp3->audio_stream_idx = -1;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (mp3->audio_stream_idx >= 0 || st->codec->codec_id != CODEC_ID_MP3) {
av_log(s, AV_LOG_ERROR, "Invalid audio stream. Exactly one MP3 "
"audio stream is required.\n");
return AVERROR(EINVAL);
}
mp3->audio_stream_idx = i;
} else if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO) {
av_log(s, AV_LOG_ERROR, "Only audio streams and pictures are allowed in MP3.\n");
return AVERROR(EINVAL);
}
}
if (mp3->audio_stream_idx < 0) {
av_log(s, AV_LOG_ERROR, "No audio stream present.\n");
return AVERROR(EINVAL);
}
mp3->pics_to_write = s->nb_streams - 1;
ff_id3v2_start(&mp3->id3, s->pb, mp3->id3v2_version, ID3v2_DEFAULT_MAGIC);
ret = ff_id3v2_write_metadata(s, &mp3->id3);
if (ret < 0)
return ret;
if (!mp3->pics_to_write) {
ff_id3v2_finish(&mp3->id3, s->pb);
mp3_write_xing(s);
}
return 0;
}
static int mp3_write_trailer(AVFormatContext *s) static int mp3_write_trailer(AVFormatContext *s)
{ {
MP3Context *mp3 = s->priv_data; MP3Context *mp3 = s->priv_data;
...@@ -371,7 +480,7 @@ AVOutputFormat ff_mp3_muxer = { ...@@ -371,7 +480,7 @@ AVOutputFormat ff_mp3_muxer = {
.extensions = "mp3", .extensions = "mp3",
.priv_data_size = sizeof(MP3Context), .priv_data_size = sizeof(MP3Context),
.audio_codec = CODEC_ID_MP3, .audio_codec = CODEC_ID_MP3,
.video_codec = CODEC_ID_NONE, .video_codec = CODEC_ID_PNG,
.write_header = mp3_write_header, .write_header = mp3_write_header,
.write_packet = mp3_write_packet, .write_packet = mp3_write_packet,
.write_trailer = mp3_write_trailer, .write_trailer = mp3_write_trailer,
......
...@@ -270,7 +270,7 @@ static int oma_read_header(AVFormatContext *s) ...@@ -270,7 +270,7 @@ static int oma_read_header(AVFormatContext *s)
ID3v2ExtraMeta *extra_meta = NULL; ID3v2ExtraMeta *extra_meta = NULL;
OMAContext *oc = s->priv_data; OMAContext *oc = s->priv_data;
ff_id3v2_read_all(s, ID3v2_EA3_MAGIC, &extra_meta); ff_id3v2_read(s, ID3v2_EA3_MAGIC, &extra_meta);
ret = avio_read(s->pb, buf, EA3_HEADER_SIZE); ret = avio_read(s->pb, buf, EA3_HEADER_SIZE);
if (ret < EA3_HEADER_SIZE) if (ret < EA3_HEADER_SIZE)
return -1; return -1;
......
...@@ -49,7 +49,7 @@ static av_cold int oma_write_header(AVFormatContext *s) ...@@ -49,7 +49,7 @@ static av_cold int oma_write_header(AVFormatContext *s)
} }
/* Metadata; OpenMG does not support ID3v2.4 */ /* Metadata; OpenMG does not support ID3v2.4 */
ff_id3v2_write(s, 3, ID3v2_EA3_MAGIC); ff_id3v2_write_simple(s, 3, ID3v2_EA3_MAGIC);
ffio_wfourcc(s->pb, "EA3\0"); ffio_wfourcc(s->pb, "EA3\0");
avio_w8(s->pb, EA3_HEADER_SIZE >> 7); avio_w8(s->pb, EA3_HEADER_SIZE >> 7);
......
...@@ -299,4 +299,5 @@ AVInputFormat ff_str_demuxer = { ...@@ -299,4 +299,5 @@ AVInputFormat ff_str_demuxer = {
.read_header = str_read_header, .read_header = str_read_header,
.read_packet = str_read_packet, .read_packet = str_read_packet,
.read_close = str_read_close, .read_close = str_read_close,
.flags = AVFMT_NO_BYTE_SEEK,
}; };
...@@ -112,7 +112,7 @@ static int rtp_write_header(AVFormatContext *s1) ...@@ -112,7 +112,7 @@ static int rtp_write_header(AVFormatContext *s1)
if (s->max_packet_size) { if (s->max_packet_size) {
if (s1->pb->max_packet_size) if (s1->pb->max_packet_size)
s->max_packet_size = FFMIN(s->max_payload_size, s->max_packet_size = FFMIN(s->max_packet_size,
s1->pb->max_packet_size); s1->pb->max_packet_size);
} else } else
s->max_packet_size = s1->pb->max_packet_size; s->max_packet_size = s1->pb->max_packet_size;
......
...@@ -113,7 +113,6 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt) ...@@ -113,7 +113,6 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = ff_codec_get_id(swf_codec_tags, avio_r8(pb)); vst->codec->codec_id = ff_codec_get_id(swf_codec_tags, avio_r8(pb));
avpriv_set_pts_info(vst, 16, 256, swf->frame_rate); avpriv_set_pts_info(vst, 16, 256, swf->frame_rate);
vst->codec->time_base = (AVRational){ 256, swf->frame_rate };
len -= 8; len -= 8;
} else if (tag == TAG_STREAMHEAD || tag == TAG_STREAMHEAD2) { } else if (tag == TAG_STREAMHEAD || tag == TAG_STREAMHEAD2) {
/* streaming found */ /* streaming found */
...@@ -186,7 +185,6 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt) ...@@ -186,7 +185,6 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = CODEC_ID_MJPEG; vst->codec->codec_id = CODEC_ID_MJPEG;
avpriv_set_pts_info(vst, 64, 256, swf->frame_rate); avpriv_set_pts_info(vst, 64, 256, swf->frame_rate);
vst->codec->time_base = (AVRational){ 256, swf->frame_rate };
st = vst; st = vst;
} }
avio_rl16(pb); /* BITMAP_ID */ avio_rl16(pb); /* BITMAP_ID */
......
...@@ -545,11 +545,29 @@ static int init_input(AVFormatContext *s, const char *filename, AVDictionary **o ...@@ -545,11 +545,29 @@ static int init_input(AVFormatContext *s, const char *filename, AVDictionary **o
return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0); return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
} }
static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
AVPacketList **plast_pktl){
AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
if (!pktl)
return NULL;
if (*packet_buffer)
(*plast_pktl)->next = pktl;
else
*packet_buffer = pktl;
/* add the packet in the buffered packet list */
*plast_pktl = pktl;
pktl->pkt= *pkt;
return &pktl->pkt;
}
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options) int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
{ {
AVFormatContext *s = *ps; AVFormatContext *s = *ps;
int ret = 0; int i, ret = 0;
AVDictionary *tmp = NULL; AVDictionary *tmp = NULL;
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
if (!s && !(s = avformat_alloc_context())) if (!s && !(s = avformat_alloc_context()))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
...@@ -592,12 +610,25 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma ...@@ -592,12 +610,25 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma
/* e.g. AVFMT_NOFILE formats will not have a AVIOContext */ /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
if (s->pb) if (s->pb)
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC); ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header) if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
if ((ret = s->iformat->read_header(s)) < 0) if ((ret = s->iformat->read_header(s)) < 0)
goto fail; goto fail;
if (id3v2_extra_meta &&
(ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
goto fail;
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
/* queue attached pictures */
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) {
AVPacket copy = s->streams[i]->attached_pic;
copy.destruct = NULL;
add_to_pktbuf(&s->raw_packet_buffer, &copy, &s->raw_packet_buffer_end);
}
if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset) if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
s->data_offset = avio_tell(s->pb); s->data_offset = avio_tell(s->pb);
...@@ -611,6 +642,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma ...@@ -611,6 +642,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma
return 0; return 0;
fail: fail:
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
av_dict_free(&tmp); av_dict_free(&tmp);
if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO)) if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
avio_close(s->pb); avio_close(s->pb);
...@@ -621,23 +653,6 @@ fail: ...@@ -621,23 +653,6 @@ fail:
/*******************************************************/ /*******************************************************/
static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
AVPacketList **plast_pktl){
AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
if (!pktl)
return NULL;
if (*packet_buffer)
(*plast_pktl)->next = pktl;
else
*packet_buffer = pktl;
/* add the packet in the buffered packet list */
*plast_pktl = pktl;
pktl->pkt= *pkt;
return &pktl->pkt;
}
int av_read_packet(AVFormatContext *s, AVPacket *pkt) int av_read_packet(AVFormatContext *s, AVPacket *pkt)
{ {
int ret, i; int ret, i;
...@@ -2722,6 +2737,8 @@ void avformat_free_context(AVFormatContext *s) ...@@ -2722,6 +2737,8 @@ void avformat_free_context(AVFormatContext *s)
av_parser_close(st->parser); av_parser_close(st->parser);
av_free_packet(&st->cur_pkt); av_free_packet(&st->cur_pkt);
} }
if (st->attached_pic.data)
av_free_packet(&st->attached_pic);
av_dict_free(&st->metadata); av_dict_free(&st->metadata);
av_freep(&st->index_entries); av_freep(&st->index_entries);
av_freep(&st->codec->extradata); av_freep(&st->codec->extradata);
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#define LIBAVFORMAT_VERSION_MAJOR 54 #define LIBAVFORMAT_VERSION_MAJOR 54
#define LIBAVFORMAT_VERSION_MINOR 1 #define LIBAVFORMAT_VERSION_MINOR 2
#define LIBAVFORMAT_VERSION_MICRO 100 #define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
......
...@@ -220,12 +220,12 @@ static int vqf_read_packet(AVFormatContext *s, AVPacket *pkt) ...@@ -220,12 +220,12 @@ static int vqf_read_packet(AVFormatContext *s, AVPacket *pkt)
int ret; int ret;
int size = (c->frame_bit_len - c->remaining_bits + 7)>>3; int size = (c->frame_bit_len - c->remaining_bits + 7)>>3;
pkt->pos = avio_tell(s->pb);
pkt->stream_index = 0;
if (av_new_packet(pkt, size+2) < 0) if (av_new_packet(pkt, size+2) < 0)
return AVERROR(EIO); return AVERROR(EIO);
pkt->pos = avio_tell(s->pb);
pkt->stream_index = 0;
pkt->data[0] = 8 - c->remaining_bits; // Number of bits to skip pkt->data[0] = 8 - c->remaining_bits; // Number of bits to skip
pkt->data[1] = c->last_frame_bits; pkt->data[1] = c->last_frame_bits;
ret = avio_read(s->pb, pkt->data+2, size); ret = avio_read(s->pb, pkt->data+2, size);
......
...@@ -1046,7 +1046,7 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter) ...@@ -1046,7 +1046,7 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter)
c->vLumBufSize= c->vLumFilterSize; c->vLumBufSize= c->vLumFilterSize;
c->vChrBufSize= c->vChrFilterSize; c->vChrBufSize= c->vChrFilterSize;
for (i=0; i<dstH; i++) { for (i=0; i<dstH; i++) {
int chrI= (int64_t)i*c->chrDstH / dstH; int chrI = (int64_t) i * c->chrDstH / dstH;
int nextSlice= FFMAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1, int nextSlice= FFMAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1,
((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample)); ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample));
......
...@@ -38,13 +38,13 @@ fi ...@@ -38,13 +38,13 @@ fi
if [ -n "$do_mpeg2_ivlc_qprd" ]; then if [ -n "$do_mpeg2_ivlc_qprd" ]; then
# mpeg2 encoding intra vlc qprd # mpeg2 encoding intra vlc qprd
do_video_encoding mpeg2ivlc-qprd.mpg "-vb 500k -bf 2 -trellis 1 -flags +qprd+mv0 -intra_vlc 1 -cmp 2 -subcmp 2 -mbd rd -vcodec mpeg2video -f mpeg2video" do_video_encoding mpeg2ivlc-qprd.mpg "-vb 500k -bf 2 -trellis 1 -flags +mv0 -mpv_flags +qp_rd -intra_vlc 1 -cmp 2 -subcmp 2 -mbd rd -vcodec mpeg2video -f mpeg2video"
do_video_decoding do_video_decoding
fi fi
if [ -n "$do_mpeg2_422" ]; then if [ -n "$do_mpeg2_422" ]; then
#mpeg2 4:2:2 encoding #mpeg2 4:2:2 encoding
do_video_encoding mpeg2_422.mpg "-vb 1000k -bf 2 -trellis 1 -flags +qprd+mv0+ildct+ilme -intra_vlc 1 -mbd rd -vcodec mpeg2video -pix_fmt yuv422p -f mpeg2video" do_video_encoding mpeg2_422.mpg "-vb 1000k -bf 2 -trellis 1 -flags +mv0+ildct+ilme -mpv_flags +qp_rd -intra_vlc 1 -mbd rd -vcodec mpeg2video -pix_fmt yuv422p -f mpeg2video"
do_video_decoding do_video_decoding
fi fi
...@@ -143,7 +143,7 @@ do_video_decoding ...@@ -143,7 +143,7 @@ do_video_decoding
fi fi
if [ -n "$do_mpeg4_qprd" ]; then if [ -n "$do_mpeg4_qprd" ]; then
do_video_encoding mpeg4-qprd.avi "-b 450k -bf 2 -trellis 1 -flags +mv4+qprd+mv0 -cmp 2 -subcmp 2 -mbd rd -an -vcodec mpeg4" do_video_encoding mpeg4-qprd.avi "-b 450k -bf 2 -trellis 1 -flags +mv4+mv0 -mpv_flags +qp_rd -cmp 2 -subcmp 2 -mbd rd -an -vcodec mpeg4"
do_video_decoding do_video_decoding
fi fi
......
...@@ -10,5 +10,8 @@ fate-cdxl-pal8: CMD = framecrc -i $(SAMPLES)/cdxl/maku.cdxl -pix_fmt rgb24 -fram ...@@ -10,5 +10,8 @@ fate-cdxl-pal8: CMD = framecrc -i $(SAMPLES)/cdxl/maku.cdxl -pix_fmt rgb24 -fram
FATE_CDXL += fate-cdxl-pal8-small FATE_CDXL += fate-cdxl-pal8-small
fate-cdxl-pal8-small: CMD = framecrc -i $(SAMPLES)/cdxl/fruit.cdxl -an -pix_fmt rgb24 -frames:v 46 fate-cdxl-pal8-small: CMD = framecrc -i $(SAMPLES)/cdxl/fruit.cdxl -an -pix_fmt rgb24 -frames:v 46
FATE_CDXL += fate-cdxl-bitline-ham6
fate-cdxl-bitline-ham6: CMD = framecrc -i $(SAMPLES)/cdxl/bitline.cdxl -frames:v 10
FATE_TESTS += $(FATE_CDXL) FATE_TESTS += $(FATE_CDXL)
fate-cdxl: $(FATE_CDXL) fate-cdxl: $(FATE_CDXL)
...@@ -33,6 +33,11 @@ fate-mp3-float-conf-si_block: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-confo ...@@ -33,6 +33,11 @@ fate-mp3-float-conf-si_block: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-confo
fate-mp3-float-conf-si_block: CMP = stddev fate-mp3-float-conf-si_block: CMP = stddev
fate-mp3-float-conf-si_block: REF = $(SAMPLES)/mp3-conformance/si_block.pcm fate-mp3-float-conf-si_block: REF = $(SAMPLES)/mp3-conformance/si_block.pcm
FATE_MP3 += fate-mp3-float-extra_overread
fate-mp3-float-extra_overread: CMD = pcm -c:a mp3float -i $(SAMPLES)/mpegaudio/extra_overread.mp3
fate-mp3-float-extra_overread: CMP = stddev
fate-mp3-float-extra_overread: REF = $(SAMPLES)/mpegaudio/extra_overread.pcm
FATE_TESTS += $(FATE_MP3) FATE_TESTS += $(FATE_MP3)
fate-mp3: $(FATE_MP3) fate-mp3: $(FATE_MP3)
$(FATE_MP3): CMP = stddev $(FATE_MP3): CMP = stddev
......
...@@ -143,7 +143,7 @@ FATE_VIDEO += fate-mpeg2-field-enc ...@@ -143,7 +143,7 @@ FATE_VIDEO += fate-mpeg2-field-enc
fate-mpeg2-field-enc: CMD = framecrc -flags +bitexact -dct fastint -idct simple -i $(SAMPLES)/mpeg2/mpeg2_field_encoding.ts -an fate-mpeg2-field-enc: CMD = framecrc -flags +bitexact -dct fastint -idct simple -i $(SAMPLES)/mpeg2/mpeg2_field_encoding.ts -an
FATE_VIDEO += fate-nuv FATE_VIDEO += fate-nuv
fate-nuv: CMD = framecrc -idct simple -i $(SAMPLES)/nuv/Today.nuv fate-nuv: CMD = framecrc -idct simple -i $(SAMPLES)/nuv/Today.nuv -an
FATE_VIDEO += fate-qpeg FATE_VIDEO += fate-qpeg
fate-qpeg: CMD = framecrc -i $(SAMPLES)/qpeg/Clock.avi -an -pix_fmt rgb24 fate-qpeg: CMD = framecrc -i $(SAMPLES)/qpeg/Clock.avi -an -pix_fmt rgb24
......
#tb 0: 12/601
0, 0, 0, 1, 63180, 0xcda82c16
0, 1, 1, 1, 63180, 0xa6097bf9
0, 2, 2, 1, 63180, 0x4c2fb091
0, 3, 3, 1, 63180, 0xc597db00
0, 4, 4, 1, 63180, 0xfa581ccd
0, 5, 5, 1, 63180, 0x3e51498f
0, 6, 6, 1, 63180, 0xe3495396
0, 7, 7, 1, 63180, 0x425f5f02
0, 8, 8, 1, 63180, 0x6077465f
0, 9, 9, 1, 63180, 0x923ba29c
#tb 0: 100/2997 #tb 0: 100/2997
#tb 1: 1/44100
1, 0, 0, 1024, 4096, 0x00000000
1, 1024, 1024, 1024, 4096, 0x4dfae7a6
1, 2048, 2048, 1024, 4096, 0x3fd9f5c6
1, 3072, 3072, 1024, 4096, 0x7b86e310
1, 4096, 4096, 1024, 4096, 0x611cece5
1, 5120, 5120, 1024, 4096, 0xb7d8e872
0, 4, 4, 1, 460800, 0x54aedafe 0, 4, 4, 1, 460800, 0x54aedafe
1, 6144, 6144, 1024, 4096, 0x072ef72b
1, 7168, 7168, 1024, 4096, 0xb3560144
0, 5, 5, 1, 460800, 0xb7aa8b56 0, 5, 5, 1, 460800, 0xb7aa8b56
1, 8192, 8192, 1024, 4096, 0x0a3d119e
0, 6, 6, 1, 460800, 0x283ea3b5 0, 6, 6, 1, 460800, 0x283ea3b5
1, 9216, 9216, 1024, 4096, 0xbe391aa4
1, 10240, 10240, 1024, 4096, 0x28f7c6e5
0, 7, 7, 1, 460800, 0x283ea3b5 0, 7, 7, 1, 460800, 0x283ea3b5
1, 11264, 11264, 1024, 4096, 0xca9d9df2
0, 8, 8, 1, 460800, 0x10e577de 0, 8, 8, 1, 460800, 0x10e577de
1, 12288, 12288, 1024, 4096, 0x5c6b95a9
0, 9, 9, 1, 460800, 0x4e091ee2 0, 9, 9, 1, 460800, 0x4e091ee2
1, 13312, 13312, 1024, 4096, 0x0bdfc0bf
1, 14336, 14336, 1024, 4096, 0xd95a9277
0, 10, 10, 1, 460800, 0x2ea88828 0, 10, 10, 1, 460800, 0x2ea88828
1, 15360, 15360, 1024, 4096, 0xae2bef2c
0, 11, 11, 1, 460800, 0x4b7f4df0 0, 11, 11, 1, 460800, 0x4b7f4df0
1, 16384, 16384, 1024, 4096, 0xbf031e83
1, 17408, 17408, 1024, 4096, 0x4c83e2d1
0, 12, 12, 1, 460800, 0xa57f20d0 0, 12, 12, 1, 460800, 0xa57f20d0
4ef091d638bb20b8eaef5b3a0d6f97b7 *./tests/data/lavf/lavf.ffm 8ce2ea9a73a1187647df7bf3c8e1b8fd *./tests/data/lavf/lavf.ffm
376832 ./tests/data/lavf/lavf.ffm 376832 ./tests/data/lavf/lavf.ffm
./tests/data/lavf/lavf.ffm CRC=0xf361ed74 ./tests/data/lavf/lavf.ffm CRC=0xf361ed74
40e7637e04991dbe9a23fe109f95bfc8 *./tests/data/vsynth1/prores_kostya.mov f8fe98b7f9bb66857c81dbca409a9037 *./tests/data/vsynth1/prores_kostya.mov
3858901 ./tests/data/vsynth1/prores_kostya.mov 3858901 ./tests/data/vsynth1/prores_kostya.mov
0a4153637d0cc0a88a8bcbf04cfaf8c6 *./tests/data/prores_kostya.vsynth1.out.yuv 0a4153637d0cc0a88a8bcbf04cfaf8c6 *./tests/data/prores_kostya.vsynth1.out.yuv
stddev: 3.17 PSNR: 38.09 MAXDIFF: 39 bytes: 7603200/ 7603200 stddev: 3.17 PSNR: 38.09 MAXDIFF: 39 bytes: 7603200/ 7603200
ed8b8a94da049518af8f95c5da736e57 *./tests/data/vsynth2/prores_kostya.mov 26adb18726c08dde23bc4bee2eb591e2 *./tests/data/vsynth2/prores_kostya.mov
3884586 ./tests/data/vsynth2/prores_kostya.mov 3884586 ./tests/data/vsynth2/prores_kostya.mov
ca2f6c1162635dedfa468c90f1fdc0ef *./tests/data/prores_kostya.vsynth2.out.yuv ca2f6c1162635dedfa468c90f1fdc0ef *./tests/data/prores_kostya.vsynth2.out.yuv
stddev: 0.92 PSNR: 48.77 MAXDIFF: 10 bytes: 7603200/ 7603200 stddev: 0.92 PSNR: 48.77 MAXDIFF: 10 bytes: 7603200/ 7603200
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment