Commit 12327237 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  aacenc: Fix issues with huge values of bit_rate.
  dv_tablegen: Drop unnecessary av_unused attribute from dv_vlc_map_tableinit().
  proresenc: multithreaded quantiser search
  riff: use bps instead of bits_per_coded_sample in the WAVEFORMATEXTENSIBLE header
  avconv: only set the "channels" option when it exists for the specified input format
  avplay: update get_buffer to be inline with avconv
  aacdec: More robust output configuration.
  faac: Fix multi-channel ordering
  faac: Add .channel_layouts
  rtmp: Support 'rtmp_playpath', an option which overrides the stream identifier
  rtmp: Support 'rtmp_app', an option which overrides the name of application
  avutil: add better documentation for AVSampleFormat

Conflicts:
	libavcodec/aac.h
	libavcodec/aacdec.c
	libavcodec/aacenc.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents a6667526 0f96f0d9
...@@ -4189,8 +4189,16 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena ...@@ -4189,8 +4189,16 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena
av_dict_set(&format_opts, "sample_rate", buf, 0); av_dict_set(&format_opts, "sample_rate", buf, 0);
} }
if (o->nb_audio_channels) { if (o->nb_audio_channels) {
snprintf(buf, sizeof(buf), "%d", o->audio_channels[o->nb_audio_channels - 1].u.i); /* because we set audio_channels based on both the "ac" and
av_dict_set(&format_opts, "channels", buf, 0); * "channel_layout" options, we need to check that the specified
* demuxer actually has the "channels" option before setting it */
if (file_iformat && file_iformat->priv_class &&
av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ)) {
snprintf(buf, sizeof(buf), "%d",
o->audio_channels[o->nb_audio_channels - 1].u.i);
av_dict_set(&format_opts, "channels", buf, 0);
}
} }
if (o->nb_frame_rates) { if (o->nb_frame_rates) {
av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0); av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
......
...@@ -1600,6 +1600,10 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic) ...@@ -1600,6 +1600,10 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
pic->opaque = ref; pic->opaque = ref;
pic->type = FF_BUFFER_TYPE_USER; pic->type = FF_BUFFER_TYPE_USER;
pic->reordered_opaque = codec->reordered_opaque; pic->reordered_opaque = codec->reordered_opaque;
pic->width = codec->width;
pic->height = codec->height;
pic->format = codec->pix_fmt;
pic->sample_aspect_ratio = codec->sample_aspect_ratio;
if (codec->pkt) pic->pkt_pts = codec->pkt->pts; if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
else pic->pkt_pts = AV_NOPTS_VALUE; else pic->pkt_pts = AV_NOPTS_VALUE;
return 0; return 0;
......
...@@ -112,6 +112,15 @@ enum OCStatus { ...@@ -112,6 +112,15 @@ enum OCStatus {
OC_LOCKED, ///< Output configuration locked in place OC_LOCKED, ///< Output configuration locked in place
}; };
typedef struct {
MPEG4AudioConfig m4ac;
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
int channels;
uint64_t channel_layout;
enum OCStatus status;
} OutputConfiguration;
/** /**
* Predictor State * Predictor State
*/ */
...@@ -254,8 +263,6 @@ typedef struct { ...@@ -254,8 +263,6 @@ typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame frame;
MPEG4AudioConfig m4ac;
int is_saved; ///< Set if elements have stored overlap from previous frame. int is_saved; ///< Set if elements have stored overlap from previous frame.
DynamicRangeControl che_drc; DynamicRangeControl che_drc;
...@@ -263,8 +270,6 @@ typedef struct { ...@@ -263,8 +270,6 @@ typedef struct {
* @name Channel element related data * @name Channel element related data
* @{ * @{
*/ */
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
ChannelElement *che[4][MAX_ELEM_ID]; ChannelElement *che[4][MAX_ELEM_ID];
ChannelElement *tag_che_map[4][MAX_ELEM_ID]; ChannelElement *tag_che_map[4][MAX_ELEM_ID];
int tags_mapped; int tags_mapped;
...@@ -299,7 +304,7 @@ typedef struct { ...@@ -299,7 +304,7 @@ typedef struct {
DECLARE_ALIGNED(32, float, temp)[128]; DECLARE_ALIGNED(32, float, temp)[128];
enum OCStatus output_configured; OutputConfiguration oc[2];
int warned_num_aac_frames; int warned_num_aac_frames;
} AACContext; } AACContext;
......
This diff is collapsed.
...@@ -571,8 +571,10 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, ...@@ -571,8 +571,10 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
} }
start_ch += chans; start_ch += chans;
} }
if ((ret = ff_alloc_packet2(avctx, avpkt, 768 * s->channels))) if ((ret = ff_alloc_packet2(avctx, avpkt, 768 * s->channels))) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
return ret; return ret;
}
do { do {
int frame_bits; int frame_bits;
......
...@@ -918,7 +918,7 @@ static void read_sbr_extension(AACContext *ac, SpectralBandReplication *sbr, ...@@ -918,7 +918,7 @@ static void read_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
{ {
switch (bs_extension_id) { switch (bs_extension_id) {
case EXTENSION_ID_PS: case EXTENSION_ID_PS:
if (!ac->m4ac.ps) { if (!ac->oc[1].m4ac.ps) {
av_log(ac->avctx, AV_LOG_ERROR, "Parametric Stereo signaled to be not-present but was found in the bitstream.\n"); av_log(ac->avctx, AV_LOG_ERROR, "Parametric Stereo signaled to be not-present but was found in the bitstream.\n");
skip_bits_long(gb, *num_bits_left); // bs_fill_bits skip_bits_long(gb, *num_bits_left); // bs_fill_bits
*num_bits_left = 0; *num_bits_left = 0;
...@@ -1077,9 +1077,9 @@ int ff_decode_sbr_extension(AACContext *ac, SpectralBandReplication *sbr, ...@@ -1077,9 +1077,9 @@ int ff_decode_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
sbr->reset = 0; sbr->reset = 0;
if (!sbr->sample_rate) if (!sbr->sample_rate)
sbr->sample_rate = 2 * ac->m4ac.sample_rate; //TODO use the nominal sample rate for arbitrary sample rate support sbr->sample_rate = 2 * ac->oc[1].m4ac.sample_rate; //TODO use the nominal sample rate for arbitrary sample rate support
if (!ac->m4ac.ext_sample_rate) if (!ac->oc[1].m4ac.ext_sample_rate)
ac->m4ac.ext_sample_rate = 2 * ac->m4ac.sample_rate; ac->oc[1].m4ac.ext_sample_rate = 2 * ac->oc[1].m4ac.sample_rate;
if (crc) { if (crc) {
skip_bits(gb, 10); // bs_sbr_crc_bits; TODO - implement CRC check skip_bits(gb, 10); // bs_sbr_crc_bits; TODO - implement CRC check
...@@ -1654,7 +1654,7 @@ static void sbr_hf_assemble(float Y1[38][64][2], ...@@ -1654,7 +1654,7 @@ static void sbr_hf_assemble(float Y1[38][64][2],
void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac, void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
float* L, float* R) float* L, float* R)
{ {
int downsampled = ac->m4ac.ext_sample_rate < sbr->sample_rate; int downsampled = ac->oc[1].m4ac.ext_sample_rate < sbr->sample_rate;
int ch; int ch;
int nch = (id_aac == TYPE_CPE) ? 2 : 1; int nch = (id_aac == TYPE_CPE) ? 2 : 1;
int err; int err;
...@@ -1701,7 +1701,7 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac, ...@@ -1701,7 +1701,7 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
sbr->X_low, ch); sbr->X_low, ch);
} }
if (ac->m4ac.ps == 1) { if (ac->oc[1].m4ac.ps == 1) {
if (sbr->ps.start) { if (sbr->ps.start) {
ff_ps_apply(ac->avctx, &sbr->ps, sbr->X[0], sbr->X[1], sbr->kx[1] + sbr->m[1]); ff_ps_apply(ac->avctx, &sbr->ps, sbr->X[0], sbr->X[1], sbr->kx[1] + sbr->m[1]);
} else { } else {
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <stdint.h> #include <stdint.h>
#include "libavutil/attributes.h"
#include "dv_vlc_data.h" #include "dv_vlc_data.h"
#if CONFIG_SMALL #if CONFIG_SMALL
...@@ -48,7 +47,7 @@ typedef struct dv_vlc_pair { ...@@ -48,7 +47,7 @@ typedef struct dv_vlc_pair {
#else #else
static struct dv_vlc_pair dv_vlc_map[DV_VLC_MAP_RUN_SIZE][DV_VLC_MAP_LEV_SIZE]; static struct dv_vlc_pair dv_vlc_map[DV_VLC_MAP_RUN_SIZE][DV_VLC_MAP_LEV_SIZE];
static void av_unused dv_vlc_map_tableinit(void) static void dv_vlc_map_tableinit(void)
{ {
int i, j; int i, j;
for (i = 0; i < NB_DV_VLC - 1; i++) { for (i = 0; i < NB_DV_VLC - 1; i++) {
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "audio_frame_queue.h" #include "audio_frame_queue.h"
#include "internal.h" #include "internal.h"
#include "libavutil/audioconvert.h"
/* libfaac has an encoder delay of 1024 samples */ /* libfaac has an encoder delay of 1024 samples */
...@@ -39,13 +40,6 @@ typedef struct FaacAudioContext { ...@@ -39,13 +40,6 @@ typedef struct FaacAudioContext {
AudioFrameQueue afq; AudioFrameQueue afq;
} FaacAudioContext; } FaacAudioContext;
static const int channel_maps[][6] = {
{ 2, 0, 1 }, //< C L R
{ 2, 0, 1, 3 }, //< C L R Cs
{ 2, 0, 1, 3, 4 }, //< C L R Ls Rs
{ 2, 0, 1, 4, 5, 3 }, //< C L R Ls Rs LFE
};
static av_cold int Faac_encode_close(AVCodecContext *avctx) static av_cold int Faac_encode_close(AVCodecContext *avctx)
{ {
FaacAudioContext *s = avctx->priv_data; FaacAudioContext *s = avctx->priv_data;
...@@ -62,6 +56,13 @@ static av_cold int Faac_encode_close(AVCodecContext *avctx) ...@@ -62,6 +56,13 @@ static av_cold int Faac_encode_close(AVCodecContext *avctx)
return 0; return 0;
} }
static const int channel_maps[][6] = {
{ 2, 0, 1 }, //< C L R
{ 2, 0, 1, 3 }, //< C L R Cs
{ 2, 0, 1, 3, 4 }, //< C L R Ls Rs
{ 2, 0, 1, 4, 5, 3 }, //< C L R Ls Rs LFE
};
static av_cold int Faac_encode_init(AVCodecContext *avctx) static av_cold int Faac_encode_init(AVCodecContext *avctx)
{ {
FaacAudioContext *s = avctx->priv_data; FaacAudioContext *s = avctx->priv_data;
...@@ -184,8 +185,10 @@ static int Faac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, ...@@ -184,8 +185,10 @@ static int Faac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
int num_samples = frame ? frame->nb_samples : 0; int num_samples = frame ? frame->nb_samples : 0;
void *samples = frame ? frame->data[0] : NULL; void *samples = frame ? frame->data[0] : NULL;
if ((ret = ff_alloc_packet2(avctx, avpkt, (7 + 768) * avctx->channels))) if ((ret = ff_alloc_packet2(avctx, avpkt, (7 + 768) * avctx->channels))) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
return ret; return ret;
}
bytes_written = faacEncEncode(s->faac_handle, samples, bytes_written = faacEncEncode(s->faac_handle, samples,
num_samples * avctx->channels, num_samples * avctx->channels,
...@@ -221,6 +224,16 @@ static const AVProfile profiles[] = { ...@@ -221,6 +224,16 @@ static const AVProfile profiles[] = {
{ FF_PROFILE_UNKNOWN }, { FF_PROFILE_UNKNOWN },
}; };
static const uint64_t faac_channel_layouts[] = {
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_4POINT0,
AV_CH_LAYOUT_5POINT0_BACK,
AV_CH_LAYOUT_5POINT1_BACK,
0
};
AVCodec ff_libfaac_encoder = { AVCodec ff_libfaac_encoder = {
.name = "libfaac", .name = "libfaac",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
...@@ -234,4 +247,5 @@ AVCodec ff_libfaac_encoder = { ...@@ -234,4 +247,5 @@ AVCodec ff_libfaac_encoder = {
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("libfaac AAC (Advanced Audio Codec)"), .long_name = NULL_IF_CONFIG_SMALL("libfaac AAC (Advanced Audio Codec)"),
.profiles = NULL_IF_CONFIG_SMALL(profiles), .profiles = NULL_IF_CONFIG_SMALL(profiles),
.channel_layouts = faac_channel_layouts,
}; };
This diff is collapsed.
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/intfloat.h" #include "libavutil/intfloat.h"
#include "libavutil/lfg.h" #include "libavutil/lfg.h"
#include "libavutil/opt.h"
#include "libavutil/sha.h" #include "libavutil/sha.h"
#include "avformat.h" #include "avformat.h"
#include "internal.h" #include "internal.h"
...@@ -41,6 +42,9 @@ ...@@ -41,6 +42,9 @@
//#define DEBUG //#define DEBUG
#define APP_MAX_LENGTH 128
#define PLAYPATH_MAX_LENGTH 256
/** RTMP protocol handler state */ /** RTMP protocol handler state */
typedef enum { typedef enum {
STATE_START, ///< client has not done anything yet STATE_START, ///< client has not done anything yet
...@@ -56,12 +60,13 @@ typedef enum { ...@@ -56,12 +60,13 @@ typedef enum {
/** protocol handler context */ /** protocol handler context */
typedef struct RTMPContext { typedef struct RTMPContext {
const AVClass *class;
URLContext* stream; ///< TCP stream used in interactions with RTMP server URLContext* stream; ///< TCP stream used in interactions with RTMP server
RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets
int chunk_size; ///< size of the chunks RTMP packets are divided into int chunk_size; ///< size of the chunks RTMP packets are divided into
int is_input; ///< input/output flag int is_input; ///< input/output flag
char playpath[256]; ///< path to filename to play (with possible "mp4:" prefix) char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
char app[128]; ///< application char *app; ///< name of application
ClientState state; ///< current state ClientState state; ///< current state
int main_channel_id; ///< an additional channel ID which is used for some invocations int main_channel_id; ///< an additional channel ID which is used for some invocations
uint8_t* flv_data; ///< buffer with data for demuxer uint8_t* flv_data; ///< buffer with data for demuxer
...@@ -822,6 +827,7 @@ static int rtmp_open(URLContext *s, const char *uri, int flags) ...@@ -822,6 +827,7 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
{ {
RTMPContext *rt = s->priv_data; RTMPContext *rt = s->priv_data;
char proto[8], hostname[256], path[1024], *fname; char proto[8], hostname[256], path[1024], *fname;
char *old_app;
uint8_t buf[2048]; uint8_t buf[2048];
int port; int port;
int ret; int ret;
...@@ -847,6 +853,16 @@ static int rtmp_open(URLContext *s, const char *uri, int flags) ...@@ -847,6 +853,16 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
rt->chunk_size = 128; rt->chunk_size = 128;
rt->state = STATE_HANDSHAKED; rt->state = STATE_HANDSHAKED;
// Keep the application name when it has been defined by the user.
old_app = rt->app;
rt->app = av_malloc(APP_MAX_LENGTH);
if (!rt->app) {
rtmp_close(s);
return AVERROR(ENOMEM);
}
//extract "app" part from path //extract "app" part from path
if (!strncmp(path, "/ondemand/", 10)) { if (!strncmp(path, "/ondemand/", 10)) {
fname = path + 10; fname = path + 10;
...@@ -868,14 +884,29 @@ static int rtmp_open(URLContext *s, const char *uri, int flags) ...@@ -868,14 +884,29 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
} }
} }
} }
if (!strchr(fname, ':') &&
(!strcmp(fname + strlen(fname) - 4, ".f4v") || if (old_app) {
!strcmp(fname + strlen(fname) - 4, ".mp4"))) { // The name of application has been defined by the user, override it.
memcpy(rt->playpath, "mp4:", 5); av_free(rt->app);
} else { rt->app = old_app;
rt->playpath[0] = 0; }
if (!rt->playpath) {
rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
if (!rt->playpath) {
rtmp_close(s);
return AVERROR(ENOMEM);
}
if (!strchr(fname, ':') &&
(!strcmp(fname + strlen(fname) - 4, ".f4v") ||
!strcmp(fname + strlen(fname) - 4, ".mp4"))) {
memcpy(rt->playpath, "mp4:", 5);
} else {
rt->playpath[0] = 0;
}
strncat(rt->playpath, fname, PLAYPATH_MAX_LENGTH - 5);
} }
strncat(rt->playpath, fname, sizeof(rt->playpath) - 5);
rt->client_report_size = 1048576; rt->client_report_size = 1048576;
rt->bytes_read = 0; rt->bytes_read = 0;
...@@ -1013,6 +1044,23 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size) ...@@ -1013,6 +1044,23 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
return size; return size;
} }
#define OFFSET(x) offsetof(RTMPContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption rtmp_options[] = {
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{ NULL },
};
static const AVClass rtmp_class = {
.class_name = "rtmp",
.item_name = av_default_item_name,
.option = rtmp_options,
.version = LIBAVUTIL_VERSION_INT,
};
URLProtocol ff_rtmp_protocol = { URLProtocol ff_rtmp_protocol = {
.name = "rtmp", .name = "rtmp",
.url_open = rtmp_open, .url_open = rtmp_open,
...@@ -1021,4 +1069,5 @@ URLProtocol ff_rtmp_protocol = { ...@@ -1021,4 +1069,5 @@ URLProtocol ff_rtmp_protocol = {
.url_close = rtmp_close, .url_close = rtmp_close,
.priv_data_size = sizeof(RTMPContext), .priv_data_size = sizeof(RTMPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK, .flags = URL_PROTOCOL_FLAG_NETWORK,
.priv_data_class= &rtmp_class,
}; };
...@@ -22,7 +22,26 @@ ...@@ -22,7 +22,26 @@
#include "avutil.h" #include "avutil.h"
/** /**
* all in native-endian format * Audio Sample Formats
*
* @par
* The data described by the sample format is always in native-endian order.
* Sample values can be expressed by native C types, hence the lack of a signed
* 24-bit sample format even though it is a common raw audio data format.
*
* @par
* The floating-point formats are based on full volume being in the range
* [-1.0, 1.0]. Any values outside this range are beyond full volume level.
*
* @par
* The data layout as used in av_samples_fill_arrays() and elsewhere in Libav
* (such as AVFrame in libavcodec) is as follows:
*
* For planar sample formats, each audio channel is in a separate data plane,
* and linesize is the buffer size, in bytes, for a single plane. All data
* planes must be the same size. For packed sample formats, only the first data
* plane is used, and samples for each channel are interleaved. In this case,
* linesize is the buffer size, in bytes, for the 1 plane.
*/ */
enum AVSampleFormat { enum AVSampleFormat {
AV_SAMPLE_FMT_NONE = -1, AV_SAMPLE_FMT_NONE = -1,
...@@ -147,6 +166,9 @@ int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, ...@@ -147,6 +166,9 @@ int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
* buffer for planar layout, or the aligned size of the buffer for all channels * buffer for planar layout, or the aligned size of the buffer for all channels
* for packed layout. * for packed layout.
* *
* @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout.
*
* @param[out] audio_data array to be filled with the pointer for each channel * @param[out] audio_data array to be filled with the pointer for each channel
* @param[out] linesize calculated linesize, may be NULL * @param[out] linesize calculated linesize, may be NULL
* @param buf the pointer to a buffer containing the samples * @param buf the pointer to a buffer containing the samples
...@@ -165,6 +187,9 @@ int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, uint8_t *buf, ...@@ -165,6 +187,9 @@ int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, uint8_t *buf,
* linesize accordingly. * linesize accordingly.
* The allocated samples buffer can be freed by using av_freep(&audio_data[0]) * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
* *
* @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout.
*
* @param[out] audio_data array to be filled with the pointer for each channel * @param[out] audio_data array to be filled with the pointer for each channel
* @param[out] linesize aligned size for audio buffer(s), may be NULL * @param[out] linesize aligned size for audio buffer(s), may be NULL
* @param nb_channels number of audio channels * @param nb_channels number of audio channels
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment