Commit 82edf672 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  lavr: add x86-optimized functions for mixing 1-to-2 s16p with flt coeffs
  lavr: add x86-optimized functions for mixing 1-to-2 fltp with flt coeffs
  Add Dolby/DPLII downmix support to libavresample
  vorbisdec: replace div/mod in loop with a counter
  fate: vorbis: add 5.1 surround test
  rtpenc: Allow requesting H264 RTP packetization mode 0
  configure: Sort the library listings in the help text alphabetically
  dwt: remove variable-length arrays
  RTMPT protocol support
  http: Properly handle chunked transfer-encoding for replies to post data
  http: Fail reading if the connection has gone away
  amr: Mark an array const
  amr: More space cleanup
  rtpenc: Fix memory leaks in the muxer open function

Conflicts:
	Changelog
	configure
	doc/APIchanges
	libavformat/version.h
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents f87dacb2 f61ce90c
......@@ -9,6 +9,7 @@ version next:
- setnsamples audio filter
- atempo filter
- ffprobe -show_data option
- RTMPT protocol support
version 0.11:
......
......@@ -174,9 +174,6 @@ External library support:
--enable-libass enable libass subtitles rendering [no]
--enable-libbluray enable BluRay reading using libbluray [no]
--enable-libcelt enable CELT decoding via libcelt [no]
--enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no]
--enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no]
--enable-libopencv enable video filtering via libopencv [no]
--enable-libcdio enable audio CD grabbing with libcdio
--enable-libdc1394 enable IIDC-1394 grabbing using libdc1394
and libraw1394 [no]
......@@ -187,6 +184,9 @@ External library support:
--enable-libmp3lame enable MP3 encoding via libmp3lame [no]
--enable-libnut enable NUT (de)muxing via libnut,
native (de)muxer exists [no]
--enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no]
--enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no]
--enable-libopencv enable video filtering via libopencv [no]
--enable-libopenjpeg enable JPEG 2000 encoding/decoding via OpenJPEG [no]
--enable-libpulse enable Pulseaudio input via libpulse [no]
--enable-librtmp enable RTMP[E] support via librtmp [no]
......@@ -1689,6 +1689,10 @@ mmsh_protocol_select="http_protocol"
mmst_protocol_deps="network"
rtmp_protocol_deps="!librtmp_protocol"
rtmp_protocol_select="tcp_protocol"
rtmphttp_protocol_deps="!librtmp_protocol"
rtmphttp_protocol_select="http_protocol"
rtmpt_protocol_deps="!librtmp_protocol"
rtmpt_protocol_select="rtmphttp_protocol"
rtp_protocol_select="udp_protocol"
sctp_protocol_deps="network netinet_sctp_h"
tcp_protocol_deps="network"
......
......@@ -38,6 +38,9 @@ API changes, most recent first:
2012-03-26 - a67d9cf - lavfi 2.66.100
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
2012-xx-xx - xxxxxxx - lavr 0.0.3
Add a parameter to avresample_build_matrix() for Dolby/DPLII downmixing.
2012-xx-xx - xxxxxxx - lavfi 2.23.0 - avfilter.h
Add AVFilterContext.nb_inputs/outputs. Deprecate
AVFilterContext.input/output_count.
......
......@@ -273,6 +273,14 @@ For example to read with @command{ffplay} a multimedia resource named
ffplay rtmp://myserver/vod/sample
@end example
@section rtmpt
Real-Time Messaging Protocol tunneled through HTTP.
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
for streaming multimedia content within HTTP requests to traverse
firewalls.
@section rtmp, rtmpe, rtmps, rtmpt, rtmpte
Real-Time Messaging Protocol and its variants supported through
......
This diff is collapsed.
......@@ -79,7 +79,7 @@ typedef struct DWTContext {
void (*vertical_compose97i)(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5,
int width);
void (*horizontal_compose97i)(IDWTELEM *b, int width);
void (*horizontal_compose97i)(IDWTELEM *b, IDWTELEM *temp, int width);
void (*inner_add_yblock)(const uint8_t *obmc, const int obmc_stride,
uint8_t **block, int b_w, int b_h, int src_x,
int src_y, int src_stride, slice_buffer *sb,
......@@ -239,7 +239,7 @@ IDWTELEM *ff_slice_buffer_load_line(slice_buffer *buf, int line);
void ff_snow_vertical_compose97i(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5,
int width);
void ff_snow_horizontal_compose97i(IDWTELEM *b, int width);
void ff_snow_horizontal_compose97i(IDWTELEM *b, IDWTELEM *temp, int width);
void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
uint8_t **block, int b_w, int b_h, int src_x,
int src_y, int src_stride, slice_buffer *sb,
......@@ -248,18 +248,18 @@ void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
int ff_w53_32_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
int ff_w97_32_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
void ff_spatial_dwt(int *buffer, int width, int height, int stride, int type,
int decomposition_count);
void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride,
int type, int decomposition_count);
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width,
int height, int stride_line, int type,
int decomposition_count);
void ff_spatial_idwt_buffered_slice(DWTContext *dsp, DWTCompose *cs,
slice_buffer *slice_buf, int width,
int height, int stride_line, int type,
int decomposition_count, int y);
void ff_spatial_idwt(IDWTELEM *buffer, int width, int height, int stride,
int type, int decomposition_count);
slice_buffer *slice_buf, IDWTELEM *temp,
int width, int height, int stride_line,
int type, int decomposition_count, int y);
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
int stride, int type, int decomposition_count);
void ff_dwt_init(DWTContext *c);
void ff_dwt_init_x86(DWTContext *c);
......
......@@ -440,6 +440,8 @@ av_cold int ff_snow_common_init(AVCodecContext *avctx){
s->spatial_idwt_buffer= av_mallocz(width*height*sizeof(IDWTELEM));
s->spatial_dwt_buffer= av_mallocz(width*height*sizeof(DWTELEM)); //FIXME this does not belong here
s->temp_dwt_buffer = av_mallocz(width * sizeof(DWTELEM));
s->temp_idwt_buffer = av_mallocz(width * sizeof(IDWTELEM));
for(i=0; i<MAX_REF_FRAMES; i++)
for(j=0; j<MAX_REF_FRAMES; j++)
......@@ -618,7 +620,9 @@ av_cold void ff_snow_common_end(SnowContext *s)
int plane_index, level, orientation, i;
av_freep(&s->spatial_dwt_buffer);
av_freep(&s->temp_dwt_buffer);
av_freep(&s->spatial_idwt_buffer);
av_freep(&s->temp_idwt_buffer);
s->m.me.temp= NULL;
av_freep(&s->m.me.scratchpad);
......
......@@ -132,7 +132,9 @@ typedef struct SnowContext{
int16_t (*ref_mvs[MAX_REF_FRAMES])[2];
uint32_t *ref_scores[MAX_REF_FRAMES];
DWTELEM *spatial_dwt_buffer;
DWTELEM *temp_dwt_buffer;
IDWTELEM *spatial_idwt_buffer;
IDWTELEM *temp_idwt_buffer;
int colorspace_type;
int chroma_h_shift;
int chroma_v_shift;
......
......@@ -502,7 +502,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
}
for(; yd<slice_h; yd+=4){
ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
}
if(s->qlog == LOSSLESS_QLOG){
......
......@@ -94,7 +94,7 @@ static void dwt_quantize(SnowContext *s, Plane *p, DWTELEM *buffer, int width, i
//FIXME pass the copy cleanly ?
// memcpy(dwt_buffer, buffer, height * stride * sizeof(DWTELEM));
ff_spatial_dwt(buffer, width, height, stride, type, s->spatial_decomposition_count);
ff_spatial_dwt(buffer, s->temp_dwt_buffer, width, height, stride, type, s->spatial_decomposition_count);
for(level=0; level<s->spatial_decomposition_count; level++){
for(orientation=level ? 1 : 0; orientation<4; orientation++){
......@@ -119,7 +119,7 @@ static void dwt_quantize(SnowContext *s, Plane *p, DWTELEM *buffer, int width, i
for(xs= 0; xs<Q2_STEP; xs++){
memcpy(idwt2_buffer, best_dequant, height * stride * sizeof(IDWTELEM));
dequantize_all(s, p, idwt2_buffer, width, height);
ff_spatial_idwt(idwt2_buffer, width, height, stride, type, s->spatial_decomposition_count);
ff_spatial_idwt(idwt2_buffer, s->temp_idwt_buffer, width, height, stride, type, s->spatial_decomposition_count);
find_sse(s, p, best_score, score_stride, idwt2_buffer, s->spatial_idwt_buffer, level, orientation);
memcpy(idwt2_buffer, best_dequant, height * stride * sizeof(IDWTELEM));
for(y=ys; y<b->height; y+= Q2_STEP){
......@@ -130,7 +130,7 @@ static void dwt_quantize(SnowContext *s, Plane *p, DWTELEM *buffer, int width, i
}
}
dequantize_all(s, p, idwt2_buffer, width, height);
ff_spatial_idwt(idwt2_buffer, width, height, stride, type, s->spatial_decomposition_count);
ff_spatial_idwt(idwt2_buffer, s->temp_idwt_buffer, width, height, stride, type, s->spatial_decomposition_count);
find_sse(s, p, score, score_stride, idwt2_buffer, s->spatial_idwt_buffer, level, orientation);
for(y=ys; y<b->height; y+= Q2_STEP){
for(x=xs; x<b->width; x+= Q2_STEP){
......@@ -1588,7 +1588,7 @@ static void calculate_visual_weight(SnowContext *s, Plane *p){
memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
ff_spatial_idwt(s->spatial_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
for(y=0; y<height; y++){
for(x=0; x<width; x++){
int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
......@@ -1778,7 +1778,7 @@ redo_frame:
/* if(QUANTIZE2)
dwt_quantize(s, p, s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type);
else*/
ff_spatial_dwt(s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
if(s->pass1_rc && plane_index==0){
int delta_qlog = ratecontrol_1pass(s, pic);
......@@ -1818,7 +1818,7 @@ redo_frame:
}
}
ff_spatial_idwt(s->spatial_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
if(s->qlog == LOSSLESS_QLOG){
for(y=0; y<h; y++){
for(x=0; x<w; x++){
......
......@@ -1413,17 +1413,24 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
}
} else if (vr_type == 2) {
voffs = voffset;
unsigned voffs_div = FASTDIV(voffset, ch);
unsigned voffs_mod = voffset - voffs_div * ch;
for (k = 0; k < step; ++k) {
coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
for (l = 0; l < dim; ++l, ++voffs) {
vec[voffs / ch + (voffs % ch) * vlen] += codebook.codevectors[coffs + l]; // FPMATH FIXME use if and counter instead of / and %
for (l = 0; l < dim; ++l) {
vec[voffs_div + voffs_mod * vlen] +=
codebook.codevectors[coffs + l];
av_dlog(NULL, " pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n",
pass, voffset / ch + (voffs % ch) * vlen,
vec[voffset / ch + (voffs % ch) * vlen],
pass, voffs_div + voffs_mod * vlen,
vec[voffs_div + voffs_mod * vlen],
codebook.codevectors[coffs + l], coffs, l);
if (++voffs_mod == ch) {
voffs_div++;
voffs_mod = 0;
}
}
}
}
......
......@@ -26,9 +26,8 @@
#include "libavcodec/dwt.h"
#include "dsputil_mmx.h"
static void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
static void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, IDWTELEM *temp, int width){
const int w2= (width+1)>>1;
DECLARE_ALIGNED(16, IDWTELEM, temp)[width>>1];
const int w_l= (width>>1);
const int w_r= w2 - 1;
int i;
......@@ -215,9 +214,8 @@ static void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
}
}
static void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width){
static void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, IDWTELEM *temp, int width){
const int w2= (width+1)>>1;
IDWTELEM temp[width >> 1];
const int w_l= (width>>1);
const int w_r= w2 - 1;
int i;
......
......@@ -373,6 +373,8 @@ OBJS-$(CONFIG_MMST_PROTOCOL) += mmst.o mms.o asf.o
OBJS-$(CONFIG_MD5_PROTOCOL) += md5proto.o
OBJS-$(CONFIG_PIPE_PROTOCOL) += file.o
OBJS-$(CONFIG_RTMP_PROTOCOL) += rtmpproto.o rtmppkt.o
OBJS-$(CONFIG_RTMPHTTP_PROTOCOL) += rtmphttp.o
OBJS-$(CONFIG_RTMPT_PROTOCOL) += rtmpproto.o rtmppkt.o
OBJS-$(CONFIG_RTP_PROTOCOL) += rtpproto.o
OBJS-$(CONFIG_SCTP_PROTOCOL) += sctp.o
OBJS-$(CONFIG_TCP_PROTOCOL) += tcp.o
......
......@@ -277,6 +277,8 @@ void av_register_all(void)
REGISTER_PROTOCOL (MD5, md5);
REGISTER_PROTOCOL (PIPE, pipe);
REGISTER_PROTOCOL (RTMP, rtmp);
REGISTER_PROTOCOL (RTMPHTTP, rtmphttp);
REGISTER_PROTOCOL (RTMPT, rtmpt);
REGISTER_PROTOCOL (RTP, rtp);
REGISTER_PROTOCOL (SCTP, sctp);
REGISTER_PROTOCOL (TCP, tcp);
......
......@@ -124,8 +124,8 @@ static int amr_read_packet(AVFormatContext *s, AVPacket *pkt)
};
size = packed_size[mode] + 1;
} else if(enc->codec_id == CODEC_ID_AMR_WB) {
static uint8_t packed_size[16] = {
} else if (enc->codec_id == CODEC_ID_AMR_WB) {
static const uint8_t packed_size[16] = {
18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1
};
......
......@@ -352,6 +352,8 @@ static int http_read_header(URLContext *h, int *new_location)
char line[1024];
int err = 0;
s->chunksize = -1;
for (;;) {
if ((err = http_get_line(s, line, sizeof(line))) < 0)
return err;
......@@ -470,7 +472,6 @@ static int http_connect(URLContext *h, const char *path, const char *local_path,
s->http_code = 200;
return 0;
}
s->chunksize = -1;
/* wait for header */
err = http_read_header(h, new_location);
......@@ -510,14 +511,13 @@ static int http_read(URLContext *h, uint8_t *buf, int size)
HTTPContext *s = h->priv_data;
int err, new_location;
if (s->end_chunked_post) {
if (!s->end_header) {
err = http_read_header(h, &new_location);
if (err < 0)
return err;
}
if (!s->hd)
return AVERROR_EOF;
return http_buf_read(h, buf, size);
if (s->end_chunked_post && !s->end_header) {
err = http_read_header(h, &new_location);
if (err < 0)
return err;
}
if (s->chunksize >= 0) {
......
/*
* RTMP HTTP network protocol
* Copyright (c) 2012 Samuel Pitoiset
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* RTMP HTTP protocol
*/
#include "libavutil/avstring.h"
#include "libavutil/intfloat.h"
#include "libavutil/opt.h"
#include "internal.h"
#include "http.h"
#define RTMPT_DEFAULT_PORT 80
/* protocol handler context */
typedef struct RTMP_HTTPContext {
URLContext *stream; ///< HTTP stream
char host[256]; ///< hostname of the server
int port; ///< port to connect (default is 80)
char client_id[64]; ///< client ID used for all requests except the first one
int seq; ///< sequence ID used for all requests
uint8_t *out_data; ///< output buffer
int out_size; ///< current output buffer size
int out_capacity; ///< current output buffer capacity
int initialized; ///< flag indicating when the http context is initialized
int finishing; ///< flag indicating when the client closes the connection
} RTMP_HTTPContext;
static int rtmp_http_send_cmd(URLContext *h, const char *cmd)
{
RTMP_HTTPContext *rt = h->priv_data;
char uri[2048];
uint8_t c;
int ret;
ff_url_join(uri, sizeof(uri), "http", NULL, rt->host, rt->port,
"/%s/%s/%d", cmd, rt->client_id, rt->seq++);
av_opt_set_bin(rt->stream->priv_data, "post_data", rt->out_data,
rt->out_size, 0);
/* send a new request to the server */
if ((ret = ff_http_do_new_request(rt->stream, uri)) < 0)
return ret;
/* re-init output buffer */
rt->out_size = 0;
/* read the first byte which contains the polling interval */
if ((ret = ffurl_read(rt->stream, &c, 1)) < 0)
return ret;
return ret;
}
static int rtmp_http_write(URLContext *h, const uint8_t *buf, int size)
{
RTMP_HTTPContext *rt = h->priv_data;
void *ptr;
if (rt->out_size + size > rt->out_capacity) {
rt->out_capacity = (rt->out_size + size) * 2;
ptr = av_realloc(rt->out_data, rt->out_capacity);
if (!ptr)
return AVERROR(ENOMEM);
rt->out_data = ptr;
}
memcpy(rt->out_data + rt->out_size, buf, size);
rt->out_size += size;
return size;
}
static int rtmp_http_read(URLContext *h, uint8_t *buf, int size)
{
RTMP_HTTPContext *rt = h->priv_data;
int ret, off = 0;
/* try to read at least 1 byte of data */
do {
ret = ffurl_read(rt->stream, buf + off, size);
if (ret < 0 && ret != AVERROR_EOF)
return ret;
if (ret == AVERROR_EOF) {
if (rt->finishing) {
/* Do not send new requests when the client wants to
* close the connection. */
return AVERROR(EAGAIN);
}
/* When the client has reached end of file for the last request,
* we have to send a new request if we have buffered data.
* Otherwise, we have to send an idle POST. */
if (rt->out_size > 0) {
if ((ret = rtmp_http_send_cmd(h, "send")) < 0)
return ret;
} else {
if ((ret = rtmp_http_write(h, "", 1)) < 0)
return ret;
if ((ret = rtmp_http_send_cmd(h, "idle")) < 0)
return ret;
}
if (h->flags & AVIO_FLAG_NONBLOCK) {
/* no incoming data to handle in nonblocking mode */
return AVERROR(EAGAIN);
}
} else {
off += ret;
size -= ret;
}
} while (off <= 0);
return off;
}
static int rtmp_http_close(URLContext *h)
{
RTMP_HTTPContext *rt = h->priv_data;
uint8_t tmp_buf[2048];
int ret = 0;
if (rt->initialized) {
/* client wants to close the connection */
rt->finishing = 1;
do {
ret = rtmp_http_read(h, tmp_buf, sizeof(tmp_buf));
} while (ret > 0);
/* re-init output buffer before sending the close command */
rt->out_size = 0;
if ((ret = rtmp_http_write(h, "", 1)) == 1)
ret = rtmp_http_send_cmd(h, "close");
}
av_freep(&rt->out_data);
ffurl_close(rt->stream);
return ret;
}
static int rtmp_http_open(URLContext *h, const char *uri, int flags)
{
RTMP_HTTPContext *rt = h->priv_data;
char headers[1024], url[1024];
int ret, off = 0;
av_url_split(NULL, 0, NULL, 0, rt->host, sizeof(rt->host), &rt->port,
NULL, 0, uri);
if (rt->port < 0)
rt->port = RTMPT_DEFAULT_PORT;
/* This is the first request that is sent to the server in order to
* register a client on the server and start a new session. The server
* replies with a unique id (usually a number) that is used by the client
* for all future requests.
* Note: the reply doesn't contain a value for the polling interval.
* A successful connect resets the consecutive index that is used
* in the URLs. */
ff_url_join(url, sizeof(url), "http", NULL, rt->host, rt->port, "/open/1");
/* alloc the http context */
if ((ret = ffurl_alloc(&rt->stream, url, AVIO_FLAG_READ_WRITE, NULL)) < 0)
goto fail;
/* set options */
snprintf(headers, sizeof(headers),
"Cache-Control: no-cache\r\n"
"Content-type: application/x-fcs\r\n"
"User-Agent: Shockwave Flash\r\n");
av_opt_set(rt->stream->priv_data, "headers", headers, 0);
av_opt_set(rt->stream->priv_data, "multiple_requests", "1", 0);
av_opt_set_bin(rt->stream->priv_data, "post_data", "", 1, 0);
/* open the http context */
if ((ret = ffurl_connect(rt->stream, NULL)) < 0)
goto fail;
/* read the server reply which contains a unique ID */
for (;;) {
ret = ffurl_read(rt->stream, rt->client_id + off, sizeof(rt->client_id) - off);
if (ret == AVERROR_EOF)
break;
if (ret < 0)
goto fail;
off += ret;
if (off == sizeof(rt->client_id)) {
ret = AVERROR(EIO);
goto fail;
}
}
while (off > 0 && isspace(rt->client_id[off - 1]))
off--;
rt->client_id[off] = '\0';
/* http context is now initialized */
rt->initialized = 1;
return 0;
fail:
rtmp_http_close(h);
return ret;
}
URLProtocol ff_rtmphttp_protocol = {
.name = "rtmphttp",
.url_open = rtmp_http_open,
.url_read = rtmp_http_read,
.url_write = rtmp_http_write,
.url_close = rtmp_http_close,
.priv_data_size = sizeof(RTMP_HTTPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
......@@ -1112,9 +1112,15 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
path, sizeof(path), s->filename);
if (port < 0)
port = RTMP_DEFAULT_PORT;
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
if (!strcmp(proto, "rtmpt")) {
/* open the http tunneling connection */
ff_url_join(buf, sizeof(buf), "rtmphttp", NULL, hostname, port, NULL);
} else {
/* open the tcp connection */
if (port < 0)
port = RTMP_DEFAULT_PORT;
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
}
if ((ret = ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, NULL)) < 0) {
......@@ -1425,3 +1431,21 @@ URLProtocol ff_rtmp_protocol = {
.flags = URL_PROTOCOL_FLAG_NETWORK,
.priv_data_class= &rtmp_class,
};
static const AVClass rtmpt_class = {
.class_name = "rtmpt",
.item_name = av_default_item_name,
.option = rtmp_options,
.version = LIBAVUTIL_VERSION_INT,
};
URLProtocol ff_rtmpt_protocol = {
.name = "rtmpt",
.url_open = rtmp_open,
.url_read = rtmp_read,
.url_write = rtmp_write,
.url_close = rtmp_close,
.priv_data_size = sizeof(RTMPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
.priv_data_class = &rtmpt_class,
};
......@@ -198,11 +198,11 @@ static int rtp_write_header(AVFormatContext *s1)
/* max_header_toc_size + the largest AMR payload must fit */
if (1 + s->max_frames_per_packet + n > s->max_payload_size) {
av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n");
return -1;
goto fail;
}
if (st->codec->channels != 1) {
av_log(s1, AV_LOG_ERROR, "Only mono is supported\n");
return -1;
goto fail;
}
case CODEC_ID_AAC:
s->num_frames = 0;
......@@ -216,6 +216,10 @@ defaultcase:
}
return 0;
fail:
av_freep(&s->buf);
return AVERROR(EINVAL);
}
/* send an rtcp sender report packet */
......
......@@ -66,12 +66,14 @@ typedef struct RTPMuxContext RTPMuxContext;
#define FF_RTP_FLAG_MP4A_LATM 1
#define FF_RTP_FLAG_RFC2190 2
#define FF_RTP_FLAG_SKIP_RTCP 4
#define FF_RTP_FLAG_H264_MODE0 8
#define FF_RTP_FLAG_OPTS(ctx, fieldname) \
{ "rtpflags", "RTP muxer flags", offsetof(ctx, fieldname), AV_OPT_TYPE_FLAGS, {.dbl = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "latm", "Use MP4A-LATM packetization instead of MPEG4-GENERIC for AAC", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_MP4A_LATM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "rfc2190", "Use RFC 2190 packetization instead of RFC 4629 for H.263", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_RFC2190}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "skip_rtcp", "Don't send RTCP sender reports", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_SKIP_RTCP}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "h264_mode0", "Use mode 0 for H264 in RTP", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_H264_MODE0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
void ff_rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m);
......
......@@ -55,6 +55,12 @@ static void nal_send(AVFormatContext *s1, const uint8_t *buf, int size, int last
uint8_t type = buf[0] & 0x1F;
uint8_t nri = buf[0] & 0x60;
if (s->flags & FF_RTP_FLAG_H264_MODE0) {
av_log(s1, AV_LOG_ERROR,
"NAL size %d > %d, try -slice-max-size %d\n", size,
s->max_payload_size, s->max_payload_size);
return;
}
av_log(s1, AV_LOG_DEBUG, "NAL size %d > %d\n", size, s->max_payload_size);
s->buf[0] = 28; /* FU Indicator; Type = 28 ---> FU-A */
s->buf[0] |= nri;
......
......@@ -388,15 +388,20 @@ static char *sdp_write_media_attributes(char *buff, int size, AVCodecContext *c,
char *config = NULL;
switch (c->codec_id) {
case CODEC_ID_H264:
case CODEC_ID_H264: {
int mode = 1;
if (fmt && fmt->oformat->priv_class &&
av_opt_flag_is_set(fmt->priv_data, "rtpflags", "h264_mode0"))
mode = 0;
if (c->extradata_size) {
config = extradata2psets(c);
}
av_strlcatf(buff, size, "a=rtpmap:%d H264/90000\r\n"
"a=fmtp:%d packetization-mode=1%s\r\n",
"a=fmtp:%d packetization-mode=%d%s\r\n",
payload_type,
payload_type, config ? config : "");
payload_type, mode, config ? config : "");
break;
}
case CODEC_ID_H263:
case CODEC_ID_H263P:
/* a=framesize is required by 3GPP TS 26.234 (PSS). It
......
......@@ -30,7 +30,7 @@
#include "libavutil/avutil.h"
#define LIBAVFORMAT_VERSION_MAJOR 54
#define LIBAVFORMAT_VERSION_MINOR 7
#define LIBAVFORMAT_VERSION_MINOR 8
#define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
......
......@@ -320,7 +320,8 @@ int ff_audio_mix_init(AVAudioResampleContext *avr)
avr->center_mix_level,
avr->surround_mix_level,
avr->lfe_mix_level, 1, matrix_dbl,
avr->in_channels);
avr->in_channels,
avr->matrix_encoding);
if (ret < 0) {
av_free(matrix_dbl);
return ret;
......
......@@ -54,6 +54,8 @@
#define SURROUND_DIRECT_LEFT 33
#define SURROUND_DIRECT_RIGHT 34
#define SQRT3_2 1.22474487139158904909 /* sqrt(3/2) */
static av_always_inline int even(uint64_t layout)
{
return (!layout || (layout & (layout - 1)));
......@@ -83,14 +85,21 @@ static int sane_layout(uint64_t layout)
int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
double center_mix_level, double surround_mix_level,
double lfe_mix_level, int normalize,
double *matrix_out, int stride)
double *matrix_out, int stride,
enum AVMatrixEncoding matrix_encoding)
{
int i, j, out_i, out_j;
double matrix[64][64] = {{0}};
int64_t unaccounted = in_layout & ~out_layout;
int64_t unaccounted;
double maxcoef = 0;
int in_channels, out_channels;
if ((out_layout & AV_CH_LAYOUT_STEREO_DOWNMIX) == AV_CH_LAYOUT_STEREO_DOWNMIX) {
out_layout = AV_CH_LAYOUT_STEREO;
}
unaccounted = in_layout & ~out_layout;
in_channels = av_get_channel_layout_nb_channels( in_layout);
out_channels = av_get_channel_layout_nb_channels(out_layout);
......@@ -140,8 +149,19 @@ int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
matrix[SIDE_LEFT ][BACK_CENTER] += M_SQRT1_2;
matrix[SIDE_RIGHT][BACK_CENTER] += M_SQRT1_2;
} else if (out_layout & AV_CH_FRONT_LEFT) {
matrix[FRONT_LEFT ][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY ||
matrix_encoding == AV_MATRIX_ENCODING_DPLII) {
if (unaccounted & (AV_CH_BACK_LEFT | AV_CH_SIDE_LEFT)) {
matrix[FRONT_LEFT ][BACK_CENTER] -= surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
} else {
matrix[FRONT_LEFT ][BACK_CENTER] -= surround_mix_level;
matrix[FRONT_RIGHT][BACK_CENTER] += surround_mix_level;
}
} else {
matrix[FRONT_LEFT ][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
}
} else if (out_layout & AV_CH_FRONT_CENTER) {
matrix[FRONT_CENTER][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
} else
......@@ -163,8 +183,20 @@ int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
matrix[SIDE_RIGHT][BACK_RIGHT] += 1.0;
}
} else if (out_layout & AV_CH_FRONT_LEFT) {
matrix[FRONT_LEFT ][BACK_LEFT ] += surround_mix_level;
matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level;
if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY) {
matrix[FRONT_LEFT ][BACK_LEFT ] -= surround_mix_level * M_SQRT1_2;
matrix[FRONT_LEFT ][BACK_RIGHT] -= surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][BACK_LEFT ] += surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level * M_SQRT1_2;
} else if (matrix_encoding == AV_MATRIX_ENCODING_DPLII) {
matrix[FRONT_LEFT ][BACK_LEFT ] -= surround_mix_level * SQRT3_2;
matrix[FRONT_LEFT ][BACK_RIGHT] -= surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][BACK_LEFT ] += surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level * SQRT3_2;
} else {
matrix[FRONT_LEFT ][BACK_LEFT ] += surround_mix_level;
matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level;
}
} else if (out_layout & AV_CH_FRONT_CENTER) {
matrix[FRONT_CENTER][BACK_LEFT ] += surround_mix_level * M_SQRT1_2;
matrix[FRONT_CENTER][BACK_RIGHT] += surround_mix_level * M_SQRT1_2;
......@@ -187,8 +219,20 @@ int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
matrix[BACK_CENTER][SIDE_LEFT ] += M_SQRT1_2;
matrix[BACK_CENTER][SIDE_RIGHT] += M_SQRT1_2;
} else if (out_layout & AV_CH_FRONT_LEFT) {
matrix[FRONT_LEFT ][SIDE_LEFT ] += surround_mix_level;
matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level;
if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY) {
matrix[FRONT_LEFT ][SIDE_LEFT ] -= surround_mix_level * M_SQRT1_2;
matrix[FRONT_LEFT ][SIDE_RIGHT] -= surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level * M_SQRT1_2;
} else if (matrix_encoding == AV_MATRIX_ENCODING_DPLII) {
matrix[FRONT_LEFT ][SIDE_LEFT ] -= surround_mix_level * SQRT3_2;
matrix[FRONT_LEFT ][SIDE_RIGHT] -= surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level * SQRT3_2;
} else {
matrix[FRONT_LEFT ][SIDE_LEFT ] += surround_mix_level;
matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level;
}
} else if (out_layout & AV_CH_FRONT_CENTER) {
matrix[FRONT_CENTER][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2;
matrix[FRONT_CENTER][SIDE_RIGHT] += surround_mix_level * M_SQRT1_2;
......
......@@ -131,12 +131,13 @@ void avresample_free(AVAudioResampleContext **avr);
* the weight of input channel i in output channel o.
* @param stride distance between adjacent input channels in the
* matrix array
* @param matrix_encoding matrixed stereo downmix mode (e.g. dplii)
* @return 0 on success, negative AVERROR code on failure
*/
int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
double center_mix_level, double surround_mix_level,
double lfe_mix_level, int normalize, double *matrix,
int stride);
int stride, enum AVMatrixEncoding matrix_encoding);
/**
* Get the current channel mixing matrix.
......
......@@ -70,6 +70,7 @@ struct AVAudioResampleContext {
AudioConvert *ac_out; /**< output sample format conversion context */
ResampleContext *resample; /**< resampling context */
AudioMix *am; /**< channel mixing context */
enum AVMatrixEncoding matrix_encoding; /**< matrixed stereo encoding */
};
#endif /* AVRESAMPLE_INTERNAL_H */
......@@ -52,6 +52,10 @@ static const AVOption options[] = {
{ "phase_shift", "Resampling Phase Shift", OFFSET(phase_shift), AV_OPT_TYPE_INT, { 10 }, 0, 30, /* ??? */ PARAM },
{ "linear_interp", "Use Linear Interpolation", OFFSET(linear_interp), AV_OPT_TYPE_INT, { 0 }, 0, 1, PARAM },
{ "cutoff", "Cutoff Frequency Ratio", OFFSET(cutoff), AV_OPT_TYPE_DOUBLE, { 0.8 }, 0.0, 1.0, PARAM },
{ "matrix_encoding", "Matrixed Stereo Encoding", OFFSET(matrix_encoding), AV_OPT_TYPE_INT, { AV_MATRIX_ENCODING_NONE}, AV_MATRIX_ENCODING_NONE, AV_MATRIX_ENCODING_NB-1, PARAM, "matrix_encoding" },
{ "none", "None", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_NONE }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
{ "dolby", "Dolby", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_DOLBY }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
{ "dplii", "Dolby Pro Logic II", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_DPLII }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
{ NULL },
};
......
......@@ -21,7 +21,7 @@
#define LIBAVRESAMPLE_VERSION_MAJOR 0
#define LIBAVRESAMPLE_VERSION_MINOR 0
#define LIBAVRESAMPLE_VERSION_MICRO 2
#define LIBAVRESAMPLE_VERSION_MICRO 3
#define LIBAVRESAMPLE_VERSION_INT AV_VERSION_INT(LIBAVRESAMPLE_VERSION_MAJOR, \
LIBAVRESAMPLE_VERSION_MINOR, \
......
......@@ -150,3 +150,84 @@ cglobal mix_2_to_1_s16p_q8, 3,4,6, src, matrix, len, src1
sub lend, mmsize/2
jg .loop
REP_RET
;-----------------------------------------------------------------------------
; void ff_mix_1_to_2_fltp_flt(float **src, float **matrix, int len,
; int out_ch, int in_ch);
;-----------------------------------------------------------------------------
%macro MIX_1_TO_2_FLTP_FLT 0
cglobal mix_1_to_2_fltp_flt, 3,5,4, src0, matrix0, len, src1, matrix1
mov src1q, [src0q+gprsize]
mov src0q, [src0q]
sub src1q, src0q
mov matrix1q, [matrix0q+gprsize]
mov matrix0q, [matrix0q]
VBROADCASTSS m2, [matrix0q]
VBROADCASTSS m3, [matrix1q]
ALIGN 16
.loop:
mova m0, [src0q]
mulps m1, m0, m3
mulps m0, m0, m2
mova [src0q ], m0
mova [src0q+src1q], m1
add src0q, mmsize
sub lend, mmsize/4
jg .loop
REP_RET
%endmacro
INIT_XMM sse
MIX_1_TO_2_FLTP_FLT
%if HAVE_AVX
INIT_YMM avx
MIX_1_TO_2_FLTP_FLT
%endif
;-----------------------------------------------------------------------------
; void ff_mix_1_to_2_s16p_flt(int16_t **src, float **matrix, int len,
; int out_ch, int in_ch);
;-----------------------------------------------------------------------------
%macro MIX_1_TO_2_S16P_FLT 0
cglobal mix_1_to_2_s16p_flt, 3,5,6, src0, matrix0, len, src1, matrix1
mov src1q, [src0q+gprsize]
mov src0q, [src0q]
sub src1q, src0q
mov matrix1q, [matrix0q+gprsize]
mov matrix0q, [matrix0q]
VBROADCASTSS m4, [matrix0q]
VBROADCASTSS m5, [matrix1q]
ALIGN 16
.loop:
mova m0, [src0q]
S16_TO_S32_SX 0, 2
cvtdq2ps m0, m0
cvtdq2ps m2, m2
mulps m1, m0, m5
mulps m0, m0, m4
mulps m3, m2, m5
mulps m2, m2, m4
cvtps2dq m0, m0
cvtps2dq m1, m1
cvtps2dq m2, m2
cvtps2dq m3, m3
packssdw m0, m2
packssdw m1, m3
mova [src0q ], m0
mova [src0q+src1q], m1
add src0q, mmsize
sub lend, mmsize/2
jg .loop
REP_RET
%endmacro
INIT_XMM sse2
MIX_1_TO_2_S16P_FLT
INIT_XMM sse4
MIX_1_TO_2_S16P_FLT
%if HAVE_AVX
INIT_XMM avx
MIX_1_TO_2_S16P_FLT
%endif
......@@ -35,6 +35,18 @@ extern void ff_mix_2_to_1_s16p_flt_sse4(int16_t **src, float **matrix, int len,
extern void ff_mix_2_to_1_s16p_q8_sse2(int16_t **src, int16_t **matrix,
int len, int out_ch, int in_ch);
extern void ff_mix_1_to_2_fltp_flt_sse(float **src, float **matrix, int len,
int out_ch, int in_ch);
extern void ff_mix_1_to_2_fltp_flt_avx(float **src, float **matrix, int len,
int out_ch, int in_ch);
extern void ff_mix_1_to_2_s16p_flt_sse2(int16_t **src, float **matrix, int len,
int out_ch, int in_ch);
extern void ff_mix_1_to_2_s16p_flt_sse4(int16_t **src, float **matrix, int len,
int out_ch, int in_ch);
extern void ff_mix_1_to_2_s16p_flt_avx (int16_t **src, float **matrix, int len,
int out_ch, int in_ch);
av_cold void ff_audio_mix_init_x86(AudioMix *am)
{
#if HAVE_YASM
......@@ -43,20 +55,30 @@ av_cold void ff_audio_mix_init_x86(AudioMix *am)
if (mm_flags & AV_CPU_FLAG_SSE && HAVE_SSE) {
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
2, 1, 16, 8, "SSE", ff_mix_2_to_1_fltp_flt_sse);
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
1, 2, 16, 4, "SSE", ff_mix_1_to_2_fltp_flt_sse);
}
if (mm_flags & AV_CPU_FLAG_SSE2 && HAVE_SSE) {
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
2, 1, 16, 8, "SSE2", ff_mix_2_to_1_s16p_flt_sse2);
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_Q8,
2, 1, 16, 8, "SSE2", ff_mix_2_to_1_s16p_q8_sse2);
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
1, 2, 16, 8, "SSE2", ff_mix_1_to_2_s16p_flt_sse2);
}
if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) {
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
2, 1, 16, 8, "SSE4", ff_mix_2_to_1_s16p_flt_sse4);
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
1, 2, 16, 8, "SSE4", ff_mix_1_to_2_s16p_flt_sse4);
}
if (mm_flags & AV_CPU_FLAG_AVX && HAVE_AVX) {
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
2, 1, 32, 16, "AVX", ff_mix_2_to_1_fltp_flt_avx);
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
1, 2, 32, 8, "AVX", ff_mix_1_to_2_fltp_flt_avx);
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
1, 2, 16, 8, "AVX", ff_mix_1_to_2_s16p_flt_avx);
}
#endif
}
......@@ -101,6 +101,13 @@
#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
enum AVMatrixEncoding {
AV_MATRIX_ENCODING_NONE,
AV_MATRIX_ENCODING_DOLBY,
AV_MATRIX_ENCODING_DPLII,
AV_MATRIX_ENCODING_NB
};
/**
* @}
*/
......
......@@ -76,6 +76,10 @@ FATE_VORBIS += fate-vorbis-19
fate-vorbis-19: CMD = pcm -i $(SAMPLES)/vorbis/test-short2_small.ogg
fate-vorbis-19: REF = $(SAMPLES)/vorbis/test-short2_small.pcm
FATE_VORBIS += fate-vorbis-20
fate-vorbis-20: CMD = pcm -i $(SAMPLES)/vorbis/6.ogg
fate-vorbis-20: REF = $(SAMPLES)/vorbis/6.pcm
FATE_SAMPLES_AVCONV += $(FATE_VORBIS)
fate-vorbis: $(FATE_VORBIS)
$(FATE_VORBIS): CMP = oneoff
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment