Commit b4178a3f authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  rtmp: Support 'rtmp_live', an option which specifies if the media is a live stream.
  av_samples_fill_array: Mark unmodified function argument as const.
  lagarith: add YUY2 decoding support
  Support decoding unaligned rgb24 lagarith.
  dv: Split profile handling code into a separate file.
  flvenc: use AVFormatContext, not AVCodecContext for logging.
  mov: Remove write-only variable in mov_read_chan().
  fate: Change the probe-format refs to match the final text format committed.
  fate: Add avprobe as a make dependency
  Add probe fate tests to test for regressions in detecting media types.
  fate: Add oneline comparison method
  qdm2: clip array indices returned by qdm2_get_vlc().
  avplay: properly close/reopen AVAudioResampleContext on channel layout change
  avcodec: do not needlessly set packet size to 0 in avcodec_encode_audio2()
  avcodec: for audio encoding, reset output packet when it is not valid
  avcodec: refactor avcodec_encode_audio2() to merge common branches
  avcodec: remove fallbacks for AVCodec.encode() in avcodec_encode_audio2()

Conflicts:
	ffplay.c
	libavcodec/Makefile
	libavcodec/dvdata.c
	libavcodec/dvdata.h
	libavcodec/qdm2.c
	libavcodec/utils.c
	libavformat/flvenc.c
	libavformat/mov.c
	tests/Makefile
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents b4b58485 b2e495af
......@@ -265,17 +265,21 @@ static inline void writer_print_chapter_footer(WriterContext *wctx,
static inline void writer_print_section_header(WriterContext *wctx,
const char *section)
{
if (wctx->writer->print_section_header)
wctx->writer->print_section_header(wctx, section);
wctx->nb_item = 0;
if (!fmt_entries_to_show || (section && av_dict_get(fmt_entries_to_show, section, NULL, 0))) {
if (wctx->writer->print_section_header)
wctx->writer->print_section_header(wctx, section);
wctx->nb_item = 0;
}
}
static inline void writer_print_section_footer(WriterContext *wctx,
const char *section)
{
if (wctx->writer->print_section_footer)
wctx->writer->print_section_footer(wctx, section);
wctx->nb_section++;
if (!fmt_entries_to_show || (section && av_dict_get(fmt_entries_to_show, section, NULL, 0))) {
if (wctx->writer->print_section_footer)
wctx->writer->print_section_footer(wctx, section);
wctx->nb_section++;
}
}
static inline void writer_print_integer(WriterContext *wctx,
......@@ -448,7 +452,8 @@ static void default_show_tags(WriterContext *wctx, AVDictionary *dict)
{
AVDictionaryEntry *tag = NULL;
while ((tag = av_dict_get(dict, "", tag, AV_DICT_IGNORE_SUFFIX))) {
printf("TAG:");
if (!fmt_entries_to_show || (tag->key && av_dict_get(fmt_entries_to_show, tag->key, NULL, 0)))
printf("TAG:");
writer_print_string(wctx, tag->key, tag->value, 0);
}
}
......
......@@ -146,8 +146,8 @@ OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o
OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o
OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o dv_profile.o
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o dv_profile.o
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
......@@ -609,8 +609,8 @@ OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_ADX_DEMUXER) += adx.o
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_DV_DEMUXER) += dvdata.o
OBJS-$(CONFIG_DV_MUXER) += dvdata.o timecode.o
OBJS-$(CONFIG_DV_DEMUXER) += dv_profile.o
OBJS-$(CONFIG_DV_MUXER) += dv_profile.o timecode.o
OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLAC_MUXER) += flacdec.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLV_DEMUXER) += mpeg4audio.o
......
This diff is collapsed.
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DV_PROFILE_H
#define AVCODEC_DV_PROFILE_H
#include <stdint.h>
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
#include "avcodec.h"
/* minimum number of bytes to read from a DV stream in order to
determine the profile */
#define DV_PROFILE_BYTES (6*80) /* 6 DIF blocks */
typedef struct DVwork_chunk {
uint16_t buf_offset;
uint16_t mb_coordinates[5];
} DVwork_chunk;
/*
* DVprofile is used to express the differences between various
* DV flavors. For now it's primarily used for differentiating
* 525/60 and 625/50, but the plans are to use it for various
* DV specs as well (e.g. SMPTE314M vs. IEC 61834).
*/
typedef struct DVprofile {
int dsf; /* value of the dsf in the DV header */
int video_stype; /* stype for VAUX source pack */
int frame_size; /* total size of one frame in bytes */
int difseg_size; /* number of DIF segments per DIF channel */
int n_difchan; /* number of DIF channels per frame */
AVRational time_base; /* 1/framerate */
int ltc_divisor; /* FPS from the LTS standpoint */
int height; /* picture height in pixels */
int width; /* picture width in pixels */
AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */
DVwork_chunk *work_chunks; /* each thread gets its own chunk of frame to work on */
uint32_t *idct_factor; /* set of iDCT factor tables */
enum PixelFormat pix_fmt; /* picture pixel format */
int bpm; /* blocks per macroblock */
const uint8_t *block_sizes; /* AC block sizes, in bits */
int audio_stride; /* size of audio_shuffle table */
int audio_min_samples[3]; /* min amount of audio samples */
/* for 48kHz, 44.1kHz and 32kHz */
int audio_samples_dist[5]; /* how many samples are supposed to be */
/* in each frame in a 5 frames window */
const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */
} DVprofile;
const DVprofile* avpriv_dv_frame_profile(const DVprofile *sys,
const uint8_t* frame, unsigned buf_size);
const DVprofile* avpriv_dv_frame_profile2(AVCodecContext* codec, const DVprofile *sys,
const uint8_t* frame, unsigned buf_size);
const DVprofile* avpriv_dv_codec_profile(AVCodecContext* codec);
/**
* Print all allowed DV profiles into logctx at specified logging level.
*/
void ff_dv_print_profiles(void *logctx, int loglevel);
#endif /* AVCODEC_DV_PROFILE_H */
This diff is collapsed.
......@@ -27,45 +27,10 @@
#ifndef AVCODEC_DVDATA_H
#define AVCODEC_DVDATA_H
#include "libavutil/rational.h"
#include "avcodec.h"
#include "dsputil.h"
#include "get_bits.h"
typedef struct DVwork_chunk {
uint16_t buf_offset;
uint16_t mb_coordinates[5];
} DVwork_chunk;
/*
* DVprofile is used to express the differences between various
* DV flavors. For now it's primarily used for differentiating
* 525/60 and 625/50, but the plans are to use it for various
* DV specs as well (e.g. SMPTE314M vs. IEC 61834).
*/
typedef struct DVprofile {
int dsf; /* value of the dsf in the DV header */
int video_stype; /* stype for VAUX source pack */
int frame_size; /* total size of one frame in bytes */
int difseg_size; /* number of DIF segments per DIF channel */
int n_difchan; /* number of DIF channels per frame */
AVRational time_base; /* 1/framerate */
int ltc_divisor; /* FPS from the LTS standpoint */
int height; /* picture height in pixels */
int width; /* picture width in pixels */
AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */
DVwork_chunk *work_chunks; /* each thread gets its own chunk of frame to work on */
uint32_t *idct_factor; /* set of iDCT factor tables */
enum PixelFormat pix_fmt; /* picture pixel format */
int bpm; /* blocks per macroblock */
const uint8_t *block_sizes; /* AC block sizes, in bits */
int audio_stride; /* size of audio_shuffle table */
int audio_min_samples[3]; /* min amount of audio samples */
/* for 48kHz, 44.1kHz and 32kHz */
int audio_samples_dist[5]; /* how many samples are supposed to be */
/* in each frame in a 5 frames window */
const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */
} DVprofile;
#include "dv_profile.h"
typedef struct DVVideoContext {
const DVprofile *sys;
......@@ -118,10 +83,6 @@ extern const int ff_dv_iweight_720_c[64];
#define DV_PROFILE_IS_1080i50(p) (((p)->video_stype == 0x14) && ((p)->dsf == 1))
#define DV_PROFILE_IS_720p50(p) (((p)->video_stype == 0x18) && ((p)->dsf == 1))
/* minimum number of bytes to read from a DV stream in order to
determine the profile */
#define DV_PROFILE_BYTES (6*80) /* 6 DIF blocks */
/**
* largest possible DV frame, in bytes (1080i50)
*/
......@@ -136,12 +97,6 @@ extern const int ff_dv_iweight_720_c[64];
extern RL_VLC_ELEM ff_dv_rl_vlc[1184];
const DVprofile* avpriv_dv_frame_profile(const DVprofile *sys,
const uint8_t* frame, unsigned buf_size);
const DVprofile* avpriv_dv_frame_profile2(AVCodecContext* codec, const DVprofile *sys,
const uint8_t* frame, unsigned buf_size);
const DVprofile* avpriv_dv_codec_profile(AVCodecContext* codec);
int ff_dv_init_dynamic_tables(const DVprofile *d);
int ff_dvvideo_init(AVCodecContext *avctx);
......@@ -166,9 +121,4 @@ static inline void dv_calculate_mb_xy(DVVideoContext *s, DVwork_chunk *work_chun
}
}
/**
* Print all allowed DV profiles into logctx at specified logging level.
*/
void ff_dv_print_profiles(void *logctx, int loglevel);
#endif /* AVCODEC_DVDATA_H */
......@@ -269,6 +269,40 @@ static void lag_pred_line(LagarithContext *l, uint8_t *buf,
}
}
static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
int width, int stride, int line,
int is_luma)
{
int L, TL;
if (!line) {
if (is_luma) {
buf++;
width--;
}
l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1, width - 1, buf[0]);
return;
}
if (line == 1) {
const int HEAD = is_luma ? 4 : 2;
int i;
L = buf[width - stride - 1];
TL = buf[HEAD - stride - 1];
for (i = 0; i < HEAD; i++) {
L += buf[i];
buf[i] = L;
}
buf += HEAD;
width -= HEAD;
} else {
TL = buf[width - (2 * stride) - 1];
L = buf[width - stride - 1];
}
l->dsp.add_hfyu_median_prediction(buf, buf - stride, buf, width,
&L, &TL);
}
static int lag_decode_line(LagarithContext *l, lag_rac *rac,
uint8_t *dst, int width, int stride,
int esc_count)
......@@ -442,9 +476,17 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
return -1;
}
for (i = 0; i < height; i++) {
lag_pred_line(l, dst, width, stride, i);
dst += stride;
if (l->avctx->pix_fmt != PIX_FMT_YUV422P) {
for (i = 0; i < height; i++) {
lag_pred_line(l, dst, width, stride, i);
dst += stride;
}
} else {
for (i = 0; i < height; i++) {
lag_pred_line_yuy2(l, dst, width, stride, i,
width == l->avctx->width);
dst += stride;
}
}
return 0;
......@@ -566,6 +608,32 @@ static int lag_decode_frame(AVCodecContext *avctx,
srcs[i] += l->rgb_stride;
}
break;
case FRAME_ARITH_YUY2:
avctx->pix_fmt = PIX_FMT_YUV422P;
if (avctx->get_buffer(avctx, p) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
if (offset_ry >= buf_size ||
offset_gu >= buf_size ||
offset_bv >= buf_size) {
av_log(avctx, AV_LOG_ERROR,
"Invalid frame offsets\n");
return AVERROR_INVALIDDATA;
}
lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
p->linesize[0], buf + offset_ry,
buf_size - offset_ry);
lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
avctx->height, p->linesize[2],
buf + offset_gu, buf_size - offset_gu);
lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
avctx->height, p->linesize[1],
buf + offset_bv, buf_size - offset_bv);
break;
case FRAME_ARITH_YV12:
avctx->pix_fmt = PIX_FMT_YUV420P;
......
......@@ -887,10 +887,10 @@ static int synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int le
case 30:
if (get_bits_left(gb) >= 4) {
unsigned v = qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1);
if (v >= FF_ARRAY_ELEMS(type30_dequant))
unsigned index = qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1);
if (index >= FF_ARRAY_ELEMS(type30_dequant))
return AVERROR_INVALIDDATA;
samples[0] = type30_dequant[v];
samples[0] = type30_dequant[index];
} else
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
......@@ -905,10 +905,10 @@ static int synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int le
type34_predictor = samples[0];
type34_first = 0;
} else {
unsigned v = qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1);
if (v >= FF_ARRAY_ELEMS(type34_delta))
unsigned index = qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1);
if (index >= FF_ARRAY_ELEMS(type34_delta))
return AVERROR_INVALIDDATA;
samples[0] = type34_delta[v] / type34_div + type34_predictor;
samples[0] = type34_delta[index] / type34_div + type34_predictor;
type34_predictor = samples[0];
}
} else {
......
......@@ -1008,7 +1008,6 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
{
int ret;
AVPacket user_pkt = *avpkt;
int nb_samples;
int needs_realloc = !user_pkt.data;
*got_packet_ptr = 0;
......@@ -1016,27 +1015,23 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
av_free_packet(avpkt);
av_init_packet(avpkt);
avpkt->size = 0;
return 0;
}
/* check for valid frame size */
if (frame) {
nb_samples = frame->nb_samples;
if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
if (nb_samples > avctx->frame_size)
if (frame->nb_samples > avctx->frame_size)
return AVERROR(EINVAL);
} else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
if (nb_samples != avctx->frame_size)
if (frame->nb_samples != avctx->frame_size)
return AVERROR(EINVAL);
}
} else {
nb_samples = avctx->frame_size;
}
if (avctx->codec->encode2) {
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret && *got_packet_ptr) {
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret) {
if (*got_packet_ptr) {
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->pts = frame->pts;
......@@ -1048,69 +1043,6 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
} else {
avpkt->size = 0;
}
} else {
/* for compatibility with encoders not supporting encode2(), we need to
allocate a packet buffer if the user has not provided one or check
the size otherwise */
int fs_tmp = 0;
int buf_size = avpkt->size;
if (!user_pkt.data) {
if (avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) {
av_assert0(av_get_bits_per_sample(avctx->codec_id) != 0);
if (!frame)
return AVERROR(EINVAL);
buf_size = nb_samples * avctx->channels *
av_get_bits_per_sample(avctx->codec_id) / 8;
} else {
/* this is a guess as to the required size.
if an encoder needs more than this, it should probably
implement encode2() */
buf_size = 2 * avctx->frame_size * avctx->channels *
av_get_bytes_per_sample(avctx->sample_fmt);
buf_size += 2*FF_MIN_BUFFER_SIZE;
}
}
if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size)))
return ret;
/* Encoders using AVCodec.encode() that support
CODEC_CAP_SMALL_LAST_FRAME require avctx->frame_size to be set to
the smaller size when encoding the last frame.
This code can be removed once all encoders supporting
CODEC_CAP_SMALL_LAST_FRAME use encode2() */
if ((avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) &&
nb_samples < avctx->frame_size) {
fs_tmp = avctx->frame_size;
avctx->frame_size = nb_samples;
}
/* encode the frame */
ret = avctx->codec->encode(avctx, avpkt->data, avpkt->size,
frame ? frame->data[0] : NULL);
if (ret >= 0) {
if (!ret) {
/* no output. if the packet data was allocated by libavcodec,
free it */
if (!user_pkt.data && avpkt->data != avctx->internal->byte_buffer)
av_freep(&avpkt->data);
} else {
if (avctx->coded_frame)
avpkt->pts = avpkt->dts = avctx->coded_frame->pts;
/* Set duration for final small packet. This can be removed
once all encoders supporting CODEC_CAP_SMALL_LAST_FRAME use
encode2() */
if (fs_tmp) {
avpkt->duration = ff_samples_to_time_base(avctx,
avctx->frame_size);
}
}
avpkt->size = ret;
*got_packet_ptr = (ret > 0);
ret = 0;
}
if (fs_tmp)
avctx->frame_size = fs_tmp;
}
if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
needs_realloc = 0;
......@@ -1141,8 +1073,11 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
avctx->frame_number++;
}
if (ret < 0 || !*got_packet_ptr)
if (ret < 0 || !*got_packet_ptr) {
av_free_packet(avpkt);
av_init_packet(avpkt);
return ret;
}
/* NOTE: if we add any audio encoders which output non-keyframe packets,
this needs to be moved to the encoders, but for now we can do it
......
......@@ -31,6 +31,7 @@
#include <time.h>
#include "avformat.h"
#include "internal.h"
#include "libavcodec/dv_profile.h"
#include "libavcodec/dvdata.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
......
......@@ -32,6 +32,7 @@
#include "avformat.h"
#include "internal.h"
#include "libavcodec/dv_profile.h"
#include "libavcodec/dvdata.h"
#include "dv.h"
#include "libavutil/fifo.h"
......
......@@ -68,18 +68,19 @@ typedef struct FLVStreamContext {
int64_t last_ts; ///< last timestamp for each stream
} FLVStreamContext;
static int get_audio_flags(AVCodecContext *enc){
static int get_audio_flags(AVFormatContext *s, AVCodecContext *enc)
{
int flags = (enc->bits_per_coded_sample == 16) ? FLV_SAMPLESSIZE_16BIT : FLV_SAMPLESSIZE_8BIT;
if (enc->codec_id == CODEC_ID_AAC) // specs force these parameters
return FLV_CODECID_AAC | FLV_SAMPLERATE_44100HZ | FLV_SAMPLESSIZE_16BIT | FLV_STEREO;
else if (enc->codec_id == CODEC_ID_SPEEX) {
if (enc->sample_rate != 16000) {
av_log(enc, AV_LOG_ERROR, "flv only supports wideband (16kHz) Speex audio\n");
av_log(s, AV_LOG_ERROR, "flv only supports wideband (16kHz) Speex audio\n");
return -1;
}
if (enc->channels != 1) {
av_log(enc, AV_LOG_ERROR, "flv only supports mono Speex audio\n");
av_log(s, AV_LOG_ERROR, "flv only supports mono Speex audio\n");
return -1;
}
return FLV_CODECID_SPEEX | FLV_SAMPLERATE_11025HZ | FLV_SAMPLESSIZE_16BIT;
......@@ -102,7 +103,7 @@ static int get_audio_flags(AVCodecContext *enc){
break;
}
default:
av_log(enc, AV_LOG_ERROR, "flv does not support that sample rate, choose from (44100, 22050, 11025).\n");
av_log(s, AV_LOG_ERROR, "flv does not support that sample rate, choose from (44100, 22050, 11025).\n");
return -1;
}
}
......@@ -140,7 +141,7 @@ static int get_audio_flags(AVCodecContext *enc){
flags |= enc->codec_tag<<4;
break;
default:
av_log(enc, AV_LOG_ERROR, "codec not compatible with flv\n");
av_log(s, AV_LOG_ERROR, "codec not compatible with flv\n");
return -1;
}
......@@ -198,12 +199,12 @@ static int flv_write_header(AVFormatContext *s)
}
video_enc = enc;
if(enc->codec_tag == 0) {
av_log(enc, AV_LOG_ERROR, "video codec not compatible with flv\n");
av_log(s, AV_LOG_ERROR, "video codec not compatible with flv\n");
return -1;
}
} else if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
audio_enc = enc;
if(get_audio_flags(enc)<0)
if (get_audio_flags(s, enc) < 0)
return -1;
}
avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */
......@@ -344,7 +345,7 @@ static int flv_write_header(AVFormatContext *s)
avio_wb24(pb, 0); // streamid
pos = avio_tell(pb);
if (enc->codec_id == CODEC_ID_AAC) {
avio_w8(pb, get_audio_flags(enc));
avio_w8(pb, get_audio_flags(s, enc));
avio_w8(pb, 0); // AAC sequence header
avio_write(pb, enc->extradata, enc->extradata_size);
} else {
......@@ -420,13 +421,13 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
flags = enc->codec_tag;
if(flags == 0) {
av_log(enc, AV_LOG_ERROR, "video codec %s not compatible with flv\n", avcodec_get_name(enc->codec_id));
av_log(s, AV_LOG_ERROR, "video codec %s not compatible with flv\n", avcodec_get_name(enc->codec_id));
return -1;
}
flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
} else if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
flags = get_audio_flags(enc);
flags = get_audio_flags(s, enc);
assert(size);
......
......@@ -655,9 +655,9 @@ static int mov_read_chan(MOVContext *c, AVIOContext *pb, MOVAtom atom)
label_mask = 0;
for (i = 0; i < num_descr; i++) {
uint32_t av_unused label, cflags;
uint32_t label;
label = avio_rb32(pb); // mChannelLabel
cflags = avio_rb32(pb); // mChannelFlags
avio_rb32(pb); // mChannelFlags
avio_rl32(pb); // mCoordinates[0]
avio_rl32(pb); // mCoordinates[1]
avio_rl32(pb); // mCoordinates[2]
......
......@@ -66,6 +66,7 @@ typedef struct RTMPContext {
int chunk_size; ///< size of the chunks RTMP packets are divided into
int is_input; ///< input/output flag
char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
int live; ///< 0: recorded, -1: live, -2: both
char *app; ///< name of application
ClientState state; ///< current state
int main_channel_id; ///< an additional channel ID which is used for some invocations
......@@ -287,7 +288,7 @@ static void gen_play(URLContext *s, RTMPContext *rt)
av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE, 0,
20 + strlen(rt->playpath));
29 + strlen(rt->playpath));
pkt.extra = rt->main_channel_id;
p = pkt.data;
......@@ -295,6 +296,7 @@ static void gen_play(URLContext *s, RTMPContext *rt)
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
ff_amf_write_number(&p, rt->live);
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
......@@ -1050,6 +1052,10 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
static const AVOption rtmp_options[] = {
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
{"any", "both", 0, AV_OPT_TYPE_CONST, {-2}, 0, 0, DEC, "rtmp_live"},
{"live", "live stream", 0, AV_OPT_TYPE_CONST, {-1}, 0, 0, DEC, "rtmp_live"},
{"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {0}, 0, 0, DEC, "rtmp_live"},
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{ NULL },
};
......
......@@ -152,7 +152,7 @@ int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
}
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
uint8_t *buf, int nb_channels, int nb_samples,
const uint8_t *buf, int nb_channels, int nb_samples,
enum AVSampleFormat sample_fmt, int align)
{
int ch, planar, buf_size, line_size;
......
......@@ -178,7 +178,8 @@ int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
* @param align buffer size alignment (0 = default, 1 = no alignment)
* @return 0 on success or a negative error code on failure
*/
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, uint8_t *buf,
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
const uint8_t *buf,
int nb_channels, int nb_samples,
enum AVSampleFormat sample_fmt, int align);
......
......@@ -62,6 +62,7 @@ include $(SRC_PATH)/tests/fate/microsoft.mak
include $(SRC_PATH)/tests/fate/mp3.mak
include $(SRC_PATH)/tests/fate/mpc.mak
include $(SRC_PATH)/tests/fate/pcm.mak
include $(SRC_PATH)/tests/fate/probe.mak
include $(SRC_PATH)/tests/fate/prores.mak
include $(SRC_PATH)/tests/fate/qt.mak
include $(SRC_PATH)/tests/fate/qtrle.mak
......@@ -138,7 +139,7 @@ TOOL = ffmpeg
fate:: $(FATE)
$(FATE) $(FATE_TESTS-no): $(TOOL)$(EXESUF) $(FATE_UTILS:%=tests/%$(HOSTEXESUF))
$(FATE) $(FATE_TESTS-no): ffmpeg$(EXESUF) ffprobe$(EXESUF) $(FATE_UTILS:%=tests/%$(HOSTEXESUF))
@echo "TEST $(@:fate-%=%)"
$(Q)$(SRC_PATH)/tests/fate-run.sh $@ "$(SAMPLES)" "$(TARGET_EXEC)" "$(TARGET_PATH)" '$(CMD)' '$(CMP)' '$(REF)' '$(FUZZ)' '$(THREADS)' '$(THREAD_TYPE)' '$(CPUFLAGS)' '$(CMP_SHIFT)' '$(CMP_TARGET)' '$(SIZE_TOLERANCE)'
......
......@@ -60,11 +60,21 @@ stddev(){
do_tiny_psnr "$1" "$2" stddev
}
oneline(){
val=$(cat "$2")
test x"$val" = x"$1" || { r=$?; printf -- '-%s\n+%s\n' "$ref" "$val"; }
return ${r:-0}
}
run(){
test "${V:-0}" -gt 0 && echo "$target_exec" $target_path/"$@" >&3
$target_exec $target_path/"$@"
}
probefmt(){
run ffprobe -show_format_entry format_name -v 0 "$@"
}
avconv(){
run ffmpeg -nostats -threads $threads -thread_type $thread_type -cpuflags $cpuflags "$@"
}
......@@ -151,11 +161,12 @@ if [ $err -gt 128 ]; then
test "${sig}" = "${sig%[!A-Za-z]*}" || unset sig
fi
if test -e "$ref"; then
if test -e "$ref" || test $cmp = "oneline" ; then
case $cmp in
diff) diff -u -w "$ref" "$outfile" >$cmpfile ;;
oneoff) oneoff "$ref" "$outfile" >$cmpfile ;;
stddev) stddev "$ref" "$outfile" >$cmpfile ;;
oneline)oneline "$ref" "$outfile" >$cmpfile ;;
null) cat "$outfile" >$cmpfile ;;
esac
cmperr=$?
......
FATE_PROBE_FORMAT += fate-probe-format-roundup997
fate-probe-format-roundup997: REF = format_name=mpeg
FATE_PROBE_FORMAT += fate-probe-format-roundup1383
fate-probe-format-roundup1383: REF = format_name=mp3
FATE_PROBE_FORMAT += fate-probe-format-roundup1414
fate-probe-format-roundup1414: REF = format_name=mpeg
FATE_PROBE_FORMAT += fate-probe-format-roundup2015
fate-probe-format-roundup2015: REF = format_name=dv
FATE_TESTS += $(FATE_PROBE_FORMAT)
fate-probe-format: $(FATE_PROBE_FORMAT)
$(FATE_PROBE_FORMAT): CMP = oneline
fate-probe-format-%: CMD = probefmt $(SAMPLES)/probe-format/$(@:fate-probe-format-%=%)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment