Commit 12327237 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  aacenc: Fix issues with huge values of bit_rate.
  dv_tablegen: Drop unnecessary av_unused attribute from dv_vlc_map_tableinit().
  proresenc: multithreaded quantiser search
  riff: use bps instead of bits_per_coded_sample in the WAVEFORMATEXTENSIBLE header
  avconv: only set the "channels" option when it exists for the specified input format
  avplay: update get_buffer to be inline with avconv
  aacdec: More robust output configuration.
  faac: Fix multi-channel ordering
  faac: Add .channel_layouts
  rtmp: Support 'rtmp_playpath', an option which overrides the stream identifier
  rtmp: Support 'rtmp_app', an option which overrides the name of application
  avutil: add better documentation for AVSampleFormat

Conflicts:
	libavcodec/aac.h
	libavcodec/aacdec.c
	libavcodec/aacenc.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents a6667526 0f96f0d9
...@@ -4189,9 +4189,17 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena ...@@ -4189,9 +4189,17 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena
av_dict_set(&format_opts, "sample_rate", buf, 0); av_dict_set(&format_opts, "sample_rate", buf, 0);
} }
if (o->nb_audio_channels) { if (o->nb_audio_channels) {
snprintf(buf, sizeof(buf), "%d", o->audio_channels[o->nb_audio_channels - 1].u.i); /* because we set audio_channels based on both the "ac" and
* "channel_layout" options, we need to check that the specified
* demuxer actually has the "channels" option before setting it */
if (file_iformat && file_iformat->priv_class &&
av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ)) {
snprintf(buf, sizeof(buf), "%d",
o->audio_channels[o->nb_audio_channels - 1].u.i);
av_dict_set(&format_opts, "channels", buf, 0); av_dict_set(&format_opts, "channels", buf, 0);
} }
}
if (o->nb_frame_rates) { if (o->nb_frame_rates) {
av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0); av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
} }
......
...@@ -1600,6 +1600,10 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic) ...@@ -1600,6 +1600,10 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
pic->opaque = ref; pic->opaque = ref;
pic->type = FF_BUFFER_TYPE_USER; pic->type = FF_BUFFER_TYPE_USER;
pic->reordered_opaque = codec->reordered_opaque; pic->reordered_opaque = codec->reordered_opaque;
pic->width = codec->width;
pic->height = codec->height;
pic->format = codec->pix_fmt;
pic->sample_aspect_ratio = codec->sample_aspect_ratio;
if (codec->pkt) pic->pkt_pts = codec->pkt->pts; if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
else pic->pkt_pts = AV_NOPTS_VALUE; else pic->pkt_pts = AV_NOPTS_VALUE;
return 0; return 0;
......
...@@ -112,6 +112,15 @@ enum OCStatus { ...@@ -112,6 +112,15 @@ enum OCStatus {
OC_LOCKED, ///< Output configuration locked in place OC_LOCKED, ///< Output configuration locked in place
}; };
typedef struct {
MPEG4AudioConfig m4ac;
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
int channels;
uint64_t channel_layout;
enum OCStatus status;
} OutputConfiguration;
/** /**
* Predictor State * Predictor State
*/ */
...@@ -254,8 +263,6 @@ typedef struct { ...@@ -254,8 +263,6 @@ typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame frame;
MPEG4AudioConfig m4ac;
int is_saved; ///< Set if elements have stored overlap from previous frame. int is_saved; ///< Set if elements have stored overlap from previous frame.
DynamicRangeControl che_drc; DynamicRangeControl che_drc;
...@@ -263,8 +270,6 @@ typedef struct { ...@@ -263,8 +270,6 @@ typedef struct {
* @name Channel element related data * @name Channel element related data
* @{ * @{
*/ */
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
ChannelElement *che[4][MAX_ELEM_ID]; ChannelElement *che[4][MAX_ELEM_ID];
ChannelElement *tag_che_map[4][MAX_ELEM_ID]; ChannelElement *tag_che_map[4][MAX_ELEM_ID];
int tags_mapped; int tags_mapped;
...@@ -299,7 +304,7 @@ typedef struct { ...@@ -299,7 +304,7 @@ typedef struct {
DECLARE_ALIGNED(32, float, temp)[128]; DECLARE_ALIGNED(32, float, temp)[128];
enum OCStatus output_configured; OutputConfiguration oc[2];
int warned_num_aac_frames; int warned_num_aac_frames;
} AACContext; } AACContext;
......
...@@ -149,13 +149,13 @@ static av_cold int che_configure(AACContext *ac, ...@@ -149,13 +149,13 @@ static av_cold int che_configure(AACContext *ac,
ff_aac_sbr_ctx_init(ac, &ac->che[type][id]->sbr); ff_aac_sbr_ctx_init(ac, &ac->che[type][id]->sbr);
} }
if (type != TYPE_CCE) { if (type != TYPE_CCE) {
if (*channels >= MAX_CHANNELS - (type == TYPE_CPE || (type == TYPE_SCE && ac->m4ac.ps == 1))) { if (*channels >= MAX_CHANNELS - (type == TYPE_CPE || (type == TYPE_SCE && ac->oc[1].m4ac.ps == 1))) {
av_log(ac->avctx, AV_LOG_ERROR, "Too many channels\n"); av_log(ac->avctx, AV_LOG_ERROR, "Too many channels\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret; ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret;
if (type == TYPE_CPE || if (type == TYPE_CPE ||
(type == TYPE_SCE && ac->m4ac.ps == 1)) { (type == TYPE_SCE && ac->oc[1].m4ac.ps == 1)) {
ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret; ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret;
} }
} }
...@@ -354,12 +354,39 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags) ...@@ -354,12 +354,39 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
return layout; return layout;
} }
/**
* Save current output configuration if and only if it has been locked.
*/
static void push_output_configuration(AACContext *ac) {
if (ac->oc[1].status == OC_LOCKED) {
ac->oc[0] = ac->oc[1];
}
ac->oc[1].status = OC_NONE;
}
/**
* Restore the previous output configuration if and only if the current
* configuration is unlocked.
*/
static void pop_output_configuration(AACContext *ac) {
if (ac->oc[1].status != OC_LOCKED) {
if (ac->oc[0].status == OC_LOCKED) {
ac->oc[1] = ac->oc[0];
ac->avctx->channels = ac->oc[1].channels;
ac->avctx->channel_layout = ac->oc[1].channel_layout;
}else{
ac->avctx->channels = 0;
ac->avctx->channel_layout = 0;
}
}
}
/** /**
* Configure output channel order based on the current program configuration element. * Configure output channel order based on the current program configuration element.
* *
* @return Returns error status. 0 - OK, !0 - error * @return Returns error status. 0 - OK, !0 - error
*/ */
static av_cold int output_configure(AACContext *ac, static int output_configure(AACContext *ac,
uint8_t layout_map[MAX_ELEM_ID*4][3], int tags, uint8_t layout_map[MAX_ELEM_ID*4][3], int tags,
int channel_config, enum OCStatus oc_type) int channel_config, enum OCStatus oc_type)
{ {
...@@ -367,9 +394,9 @@ static av_cold int output_configure(AACContext *ac, ...@@ -367,9 +394,9 @@ static av_cold int output_configure(AACContext *ac,
int i, channels = 0, ret; int i, channels = 0, ret;
uint64_t layout = 0; uint64_t layout = 0;
if (ac->layout_map != layout_map) { if (ac->oc[1].layout_map != layout_map) {
memcpy(ac->layout_map, layout_map, tags * sizeof(layout_map[0])); memcpy(ac->oc[1].layout_map, layout_map, tags * sizeof(layout_map[0]));
ac->layout_map_tags = tags; ac->oc[1].layout_map_tags = tags;
} }
// Try to sniff a reasonable channel order, otherwise output the // Try to sniff a reasonable channel order, otherwise output the
...@@ -389,8 +416,9 @@ static av_cold int output_configure(AACContext *ac, ...@@ -389,8 +416,9 @@ static av_cold int output_configure(AACContext *ac,
memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0])); memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
if (layout) avctx->channel_layout = layout; if (layout) avctx->channel_layout = layout;
avctx->channels = channels; ac->oc[1].channel_layout = layout;
ac->output_configured = oc_type; avctx->channels = ac->oc[1].channels = channels;
ac->oc[1].status = oc_type;
return 0; return 0;
} }
...@@ -418,7 +446,7 @@ static void flush(AVCodecContext *avctx) ...@@ -418,7 +446,7 @@ static void flush(AVCodecContext *avctx)
* *
* @return Returns error status. 0 - OK, !0 - error * @return Returns error status. 0 - OK, !0 - error
*/ */
static av_cold int set_default_channel_config(AVCodecContext *avctx, static int set_default_channel_config(AVCodecContext *avctx,
uint8_t (*layout_map)[3], uint8_t (*layout_map)[3],
int *tags, int *tags,
int channel_config) int channel_config)
...@@ -436,13 +464,14 @@ static av_cold int set_default_channel_config(AVCodecContext *avctx, ...@@ -436,13 +464,14 @@ static av_cold int set_default_channel_config(AVCodecContext *avctx,
static ChannelElement *get_che(AACContext *ac, int type, int elem_id) static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
{ {
// For PCE based channel configurations map the channels solely based on tags. // For PCE based channel configurations map the channels solely based on tags.
if (!ac->m4ac.chan_config) { if (!ac->oc[1].m4ac.chan_config) {
return ac->tag_che_map[type][elem_id]; return ac->tag_che_map[type][elem_id];
} }
// Allow single CPE stereo files to be signalled with mono configuration. // Allow single CPE stereo files to be signalled with mono configuration.
if (!ac->tags_mapped && type == TYPE_CPE && ac->m4ac.chan_config == 1) { if (!ac->tags_mapped && type == TYPE_CPE && ac->oc[1].m4ac.chan_config == 1) {
uint8_t layout_map[MAX_ELEM_ID*4][3]; uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags; int layout_map_tags;
push_output_configuration(ac);
if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags, if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
2) < 0) 2) < 0)
...@@ -451,10 +480,25 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id) ...@@ -451,10 +480,25 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
2, OC_TRIAL_FRAME) < 0) 2, OC_TRIAL_FRAME) < 0)
return NULL; return NULL;
ac->m4ac.chan_config = 2; ac->oc[1].m4ac.chan_config = 2;
}
// And vice-versa
if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) {
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
push_output_configuration(ac);
if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
1) < 0)
return NULL;
if (output_configure(ac, layout_map, layout_map_tags,
1, OC_TRIAL_FRAME) < 0)
return NULL;
ac->oc[1].m4ac.chan_config = 1;
} }
// For indexed channel configurations map the channels solely based on position. // For indexed channel configurations map the channels solely based on position.
switch (ac->m4ac.chan_config) { switch (ac->oc[1].m4ac.chan_config) {
case 7: case 7:
if (ac->tags_mapped == 3 && type == TYPE_CPE) { if (ac->tags_mapped == 3 && type == TYPE_CPE) {
ac->tags_mapped++; ac->tags_mapped++;
...@@ -464,7 +508,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id) ...@@ -464,7 +508,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
/* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1] /* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */ encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) { if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
ac->tags_mapped++; ac->tags_mapped++;
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0]; return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
} }
...@@ -474,16 +518,16 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id) ...@@ -474,16 +518,16 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1]; return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
} }
case 4: case 4:
if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) { if (ac->tags_mapped == 2 && ac->oc[1].m4ac.chan_config == 4 && type == TYPE_SCE) {
ac->tags_mapped++; ac->tags_mapped++;
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1]; return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
} }
case 3: case 3:
case 2: case 2:
if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) { if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) && type == TYPE_CPE) {
ac->tags_mapped++; ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0]; return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
} else if (ac->m4ac.chan_config == 2) { } else if (ac->oc[1].m4ac.chan_config == 2) {
return NULL; return NULL;
} }
case 1: case 1:
...@@ -783,10 +827,10 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) ...@@ -783,10 +827,10 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
float output_scale_factor; float output_scale_factor;
ac->avctx = avctx; ac->avctx = avctx;
ac->m4ac.sample_rate = avctx->sample_rate; ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
if (avctx->extradata_size > 0) { if (avctx->extradata_size > 0) {
if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac, if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
avctx->extradata, avctx->extradata,
avctx->extradata_size*8, 1) < 0) avctx->extradata_size*8, 1) < 0)
return -1; return -1;
...@@ -796,10 +840,10 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) ...@@ -796,10 +840,10 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
int layout_map_tags; int layout_map_tags;
sr = sample_rate_idx(avctx->sample_rate); sr = sample_rate_idx(avctx->sample_rate);
ac->m4ac.sampling_index = sr; ac->oc[1].m4ac.sampling_index = sr;
ac->m4ac.channels = avctx->channels; ac->oc[1].m4ac.channels = avctx->channels;
ac->m4ac.sbr = -1; ac->oc[1].m4ac.sbr = -1;
ac->m4ac.ps = -1; ac->oc[1].m4ac.ps = -1;
for (i = 0; i < FF_ARRAY_ELEMS(ff_mpeg4audio_channels); i++) for (i = 0; i < FF_ARRAY_ELEMS(ff_mpeg4audio_channels); i++)
if (ff_mpeg4audio_channels[i] == avctx->channels) if (ff_mpeg4audio_channels[i] == avctx->channels)
...@@ -807,14 +851,14 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) ...@@ -807,14 +851,14 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
if (i == FF_ARRAY_ELEMS(ff_mpeg4audio_channels)) { if (i == FF_ARRAY_ELEMS(ff_mpeg4audio_channels)) {
i = 0; i = 0;
} }
ac->m4ac.chan_config = i; ac->oc[1].m4ac.chan_config = i;
if (ac->m4ac.chan_config) { if (ac->oc[1].m4ac.chan_config) {
int ret = set_default_channel_config(avctx, layout_map, int ret = set_default_channel_config(avctx, layout_map,
&layout_map_tags, ac->m4ac.chan_config); &layout_map_tags, ac->oc[1].m4ac.chan_config);
if (!ret) if (!ret)
output_configure(ac, layout_map, layout_map_tags, output_configure(ac, layout_map, layout_map_tags,
ac->m4ac.chan_config, OC_GLOBAL_HDR); ac->oc[1].m4ac.chan_config, OC_GLOBAL_HDR);
else if (avctx->err_recognition & AV_EF_EXPLODE) else if (avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
...@@ -902,7 +946,7 @@ static int decode_prediction(AACContext *ac, IndividualChannelStream *ics, ...@@ -902,7 +946,7 @@ static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
return -1; return -1;
} }
} }
for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->m4ac.sampling_index]); sfb++) { for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]); sfb++) {
ics->prediction_used[sfb] = get_bits1(gb); ics->prediction_used[sfb] = get_bits1(gb);
} }
return 0; return 0;
...@@ -950,24 +994,24 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics, ...@@ -950,24 +994,24 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
} }
} }
ics->num_windows = 8; ics->num_windows = 8;
ics->swb_offset = ff_swb_offset_128[ac->m4ac.sampling_index]; ics->swb_offset = ff_swb_offset_128[ac->oc[1].m4ac.sampling_index];
ics->num_swb = ff_aac_num_swb_128[ac->m4ac.sampling_index]; ics->num_swb = ff_aac_num_swb_128[ac->oc[1].m4ac.sampling_index];
ics->tns_max_bands = ff_tns_max_bands_128[ac->m4ac.sampling_index]; ics->tns_max_bands = ff_tns_max_bands_128[ac->oc[1].m4ac.sampling_index];
ics->predictor_present = 0; ics->predictor_present = 0;
} else { } else {
ics->max_sfb = get_bits(gb, 6); ics->max_sfb = get_bits(gb, 6);
ics->num_windows = 1; ics->num_windows = 1;
ics->swb_offset = ff_swb_offset_1024[ac->m4ac.sampling_index]; ics->swb_offset = ff_swb_offset_1024[ac->oc[1].m4ac.sampling_index];
ics->num_swb = ff_aac_num_swb_1024[ac->m4ac.sampling_index]; ics->num_swb = ff_aac_num_swb_1024[ac->oc[1].m4ac.sampling_index];
ics->tns_max_bands = ff_tns_max_bands_1024[ac->m4ac.sampling_index]; ics->tns_max_bands = ff_tns_max_bands_1024[ac->oc[1].m4ac.sampling_index];
ics->predictor_present = get_bits1(gb); ics->predictor_present = get_bits1(gb);
ics->predictor_reset_group = 0; ics->predictor_reset_group = 0;
if (ics->predictor_present) { if (ics->predictor_present) {
if (ac->m4ac.object_type == AOT_AAC_MAIN) { if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN) {
if (decode_prediction(ac, ics, gb)) { if (decode_prediction(ac, ics, gb)) {
goto fail; goto fail;
} }
} else if (ac->m4ac.object_type == AOT_AAC_LC) { } else if (ac->oc[1].m4ac.object_type == AOT_AAC_LC) {
av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n"); av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
goto fail; goto fail;
} else { } else {
...@@ -1141,7 +1185,7 @@ static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns, ...@@ -1141,7 +1185,7 @@ static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
{ {
int w, filt, i, coef_len, coef_res, coef_compress; int w, filt, i, coef_len, coef_res, coef_compress;
const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE; const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE;
const int tns_max_order = is8 ? 7 : ac->m4ac.object_type == AOT_AAC_MAIN ? 20 : 12; const int tns_max_order = is8 ? 7 : ac->oc[1].m4ac.object_type == AOT_AAC_MAIN ? 20 : 12;
for (w = 0; w < ics->num_windows; w++) { for (w = 0; w < ics->num_windows; w++) {
if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) { if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) {
coef_res = get_bits1(gb); coef_res = get_bits1(gb);
...@@ -1552,7 +1596,7 @@ static void apply_prediction(AACContext *ac, SingleChannelElement *sce) ...@@ -1552,7 +1596,7 @@ static void apply_prediction(AACContext *ac, SingleChannelElement *sce)
} }
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) { if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->m4ac.sampling_index]; sfb++) { for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]; sfb++) {
for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) { for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) {
predict(&sce->predictor_state[k], &sce->coeffs[k], predict(&sce->predictor_state[k], &sce->coeffs[k],
sce->ics.predictor_present && sce->ics.prediction_used[sfb]); sce->ics.predictor_present && sce->ics.prediction_used[sfb]);
...@@ -1621,7 +1665,7 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce, ...@@ -1621,7 +1665,7 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0) if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0)
return -1; return -1;
if (ac->m4ac.object_type == AOT_AAC_MAIN && !common_window) if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window)
apply_prediction(ac, sce); apply_prediction(ac, sce);
return 0; return 0;
...@@ -1711,7 +1755,7 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe) ...@@ -1711,7 +1755,7 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
i = cpe->ch[1].ics.use_kb_window[0]; i = cpe->ch[1].ics.use_kb_window[0];
cpe->ch[1].ics = cpe->ch[0].ics; cpe->ch[1].ics = cpe->ch[0].ics;
cpe->ch[1].ics.use_kb_window[1] = i; cpe->ch[1].ics.use_kb_window[1] = i;
if (cpe->ch[1].ics.predictor_present && (ac->m4ac.object_type != AOT_AAC_MAIN)) if (cpe->ch[1].ics.predictor_present && (ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1))) if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
decode_ltp(ac, &cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb); decode_ltp(ac, &cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
ms_present = get_bits(gb, 2); ms_present = get_bits(gb, 2);
...@@ -1729,7 +1773,7 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe) ...@@ -1729,7 +1773,7 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
if (common_window) { if (common_window) {
if (ms_present) if (ms_present)
apply_mid_side_stereo(ac, cpe); apply_mid_side_stereo(ac, cpe);
if (ac->m4ac.object_type == AOT_AAC_MAIN) { if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN) {
apply_prediction(ac, &cpe->ch[0]); apply_prediction(ac, &cpe->ch[0]);
apply_prediction(ac, &cpe->ch[1]); apply_prediction(ac, &cpe->ch[1]);
} }
...@@ -1910,21 +1954,21 @@ static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt, ...@@ -1910,21 +1954,21 @@ static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
if (!che) { if (!che) {
av_log(ac->avctx, AV_LOG_ERROR, "SBR was found before the first channel element.\n"); av_log(ac->avctx, AV_LOG_ERROR, "SBR was found before the first channel element.\n");
return res; return res;
} else if (!ac->m4ac.sbr) { } else if (!ac->oc[1].m4ac.sbr) {
av_log(ac->avctx, AV_LOG_ERROR, "SBR signaled to be not-present but was found in the bitstream.\n"); av_log(ac->avctx, AV_LOG_ERROR, "SBR signaled to be not-present but was found in the bitstream.\n");
skip_bits_long(gb, 8 * cnt - 4); skip_bits_long(gb, 8 * cnt - 4);
return res; return res;
} else if (ac->m4ac.sbr == -1 && ac->output_configured == OC_LOCKED) { } else if (ac->oc[1].m4ac.sbr == -1 && ac->oc[1].status == OC_LOCKED) {
av_log(ac->avctx, AV_LOG_ERROR, "Implicit SBR was found with a first occurrence after the first frame.\n"); av_log(ac->avctx, AV_LOG_ERROR, "Implicit SBR was found with a first occurrence after the first frame.\n");
skip_bits_long(gb, 8 * cnt - 4); skip_bits_long(gb, 8 * cnt - 4);
return res; return res;
} else if (ac->m4ac.ps == -1 && ac->output_configured < OC_LOCKED && ac->avctx->channels == 1) { } else if (ac->oc[1].m4ac.ps == -1 && ac->oc[1].status < OC_LOCKED && ac->avctx->channels == 1) {
ac->m4ac.sbr = 1; ac->oc[1].m4ac.sbr = 1;
ac->m4ac.ps = 1; ac->oc[1].m4ac.ps = 1;
output_configure(ac, ac->layout_map, ac->layout_map_tags, output_configure(ac, ac->oc[1].layout_map, ac->oc[1].layout_map_tags,
ac->m4ac.chan_config, ac->output_configured); ac->oc[1].m4ac.chan_config, ac->oc[1].status);
} else { } else {
ac->m4ac.sbr = 1; ac->oc[1].m4ac.sbr = 1;
} }
res = ff_decode_sbr_extension(ac, &che->sbr, gb, crc_flag, cnt, elem_type); res = ff_decode_sbr_extension(ac, &che->sbr, gb, crc_flag, cnt, elem_type);
break; break;
...@@ -2170,7 +2214,7 @@ static void apply_dependent_coupling(AACContext *ac, ...@@ -2170,7 +2214,7 @@ static void apply_dependent_coupling(AACContext *ac,
float *dest = target->coeffs; float *dest = target->coeffs;
const float *src = cce->ch[0].coeffs; const float *src = cce->ch[0].coeffs;
int g, i, group, k, idx = 0; int g, i, group, k, idx = 0;
if (ac->m4ac.object_type == AOT_AAC_LTP) { if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
av_log(ac->avctx, AV_LOG_ERROR, av_log(ac->avctx, AV_LOG_ERROR,
"Dependent coupling is not supported together with LTP\n"); "Dependent coupling is not supported together with LTP\n");
return; return;
...@@ -2205,7 +2249,7 @@ static void apply_independent_coupling(AACContext *ac, ...@@ -2205,7 +2249,7 @@ static void apply_independent_coupling(AACContext *ac,
const float gain = cce->coup.gain[index][0]; const float gain = cce->coup.gain[index][0];
const float *src = cce->ch[0].ret; const float *src = cce->ch[0].ret;
float *dest = target->ret; float *dest = target->ret;
const int len = 1024 << (ac->m4ac.sbr == 1); const int len = 1024 << (ac->oc[1].m4ac.sbr == 1);
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
dest[i] += gain * src[i]; dest[i] += gain * src[i];
...@@ -2258,7 +2302,7 @@ static void spectral_to_sample(AACContext *ac) ...@@ -2258,7 +2302,7 @@ static void spectral_to_sample(AACContext *ac)
if (che) { if (che) {
if (type <= TYPE_CPE) if (type <= TYPE_CPE)
apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling); apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
if (ac->m4ac.object_type == AOT_AAC_LTP) { if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
if (che->ch[0].ics.predictor_present) { if (che->ch[0].ics.predictor_present) {
if (che->ch[0].ics.ltp.present) if (che->ch[0].ics.ltp.present)
apply_ltp(ac, &che->ch[0]); apply_ltp(ac, &che->ch[0]);
...@@ -2274,14 +2318,14 @@ static void spectral_to_sample(AACContext *ac) ...@@ -2274,14 +2318,14 @@ static void spectral_to_sample(AACContext *ac)
apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling); apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling);
if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) { if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) {
imdct_and_windowing(ac, &che->ch[0]); imdct_and_windowing(ac, &che->ch[0]);
if (ac->m4ac.object_type == AOT_AAC_LTP) if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
update_ltp(ac, &che->ch[0]); update_ltp(ac, &che->ch[0]);
if (type == TYPE_CPE) { if (type == TYPE_CPE) {
imdct_and_windowing(ac, &che->ch[1]); imdct_and_windowing(ac, &che->ch[1]);
if (ac->m4ac.object_type == AOT_AAC_LTP) if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
update_ltp(ac, &che->ch[1]); update_ltp(ac, &che->ch[1]);
} }
if (ac->m4ac.sbr > 0) { if (ac->oc[1].m4ac.sbr > 0) {
ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret); ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret);
} }
} }
...@@ -2301,33 +2345,33 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb) ...@@ -2301,33 +2345,33 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
size = avpriv_aac_parse_header(gb, &hdr_info); size = avpriv_aac_parse_header(gb, &hdr_info);
if (size > 0) { if (size > 0) {
if (!ac->warned_num_aac_frames && hdr_info.num_aac_frames != 1) {
// This is 2 for "VLB " audio in NSV files.
// See samples/nsv/vlb_audio.
av_log_missing_feature(ac->avctx, "More than one AAC RDB per ADTS frame is", 0);
ac->warned_num_aac_frames = 1;
}
push_output_configuration(ac);
if (hdr_info.chan_config) { if (hdr_info.chan_config) {
ac->m4ac.chan_config = hdr_info.chan_config; ac->oc[1].m4ac.chan_config = hdr_info.chan_config;
if (set_default_channel_config(ac->avctx, layout_map, if (set_default_channel_config(ac->avctx, layout_map,
&layout_map_tags, hdr_info.chan_config)) &layout_map_tags, hdr_info.chan_config))
return -7; return -7;
if (output_configure(ac, layout_map, layout_map_tags, if (output_configure(ac, layout_map, layout_map_tags,
hdr_info.chan_config, hdr_info.chan_config,
FFMAX(ac->output_configured, OC_TRIAL_FRAME))) FFMAX(ac->oc[1].status, OC_TRIAL_FRAME)))
return -7; return -7;
} else if (ac->output_configured != OC_LOCKED) { } else {
ac->m4ac.chan_config = 0; ac->oc[1].m4ac.chan_config = 0;
ac->output_configured = OC_NONE; }
} ac->oc[1].m4ac.sample_rate = hdr_info.sample_rate;
if (ac->output_configured != OC_LOCKED) { ac->oc[1].m4ac.sampling_index = hdr_info.sampling_index;
ac->m4ac.sbr = -1; ac->oc[1].m4ac.object_type = hdr_info.object_type;
ac->m4ac.ps = -1; if (ac->oc[0].status != OC_LOCKED ||
ac->m4ac.sample_rate = hdr_info.sample_rate; ac->oc[0].m4ac.chan_config != hdr_info.chan_config ||
ac->m4ac.sampling_index = hdr_info.sampling_index; ac->oc[0].m4ac.sample_rate != hdr_info.sample_rate) {
ac->m4ac.object_type = hdr_info.object_type; ac->oc[1].m4ac.sbr = -1;
} ac->oc[1].m4ac.ps = -1;
if (!ac->avctx->sample_rate)
ac->avctx->sample_rate = hdr_info.sample_rate;
if (!ac->warned_num_aac_frames && hdr_info.num_aac_frames != 1) {
// This is 2 for "VLB " audio in NSV files.
// See samples/nsv/vlb_audio.
av_log_missing_feature(ac->avctx, "More than one AAC RDB per ADTS frame is", 0);
ac->warned_num_aac_frames = 1;
} }
if (!hdr_info.crc_absent) if (!hdr_info.crc_absent)
skip_bits(gb, 16); skip_bits(gb, 16);
...@@ -2342,16 +2386,18 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, ...@@ -2342,16 +2386,18 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
ChannelElement *che = NULL, *che_prev = NULL; ChannelElement *che = NULL, *che_prev = NULL;
enum RawDataBlockType elem_type, elem_type_prev = TYPE_END; enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
int err, elem_id; int err, elem_id;
int samples = 0, multiplier, audio_found = 0; int samples = 0, multiplier, audio_found = 0, pce_found = 0;
if (show_bits(gb, 12) == 0xfff) { if (show_bits(gb, 12) == 0xfff) {
if (parse_adts_frame_header(ac, gb) < 0) { if (parse_adts_frame_header(ac, gb) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n"); av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n");
return -1; err = -1;
goto fail;
} }
if (ac->m4ac.sampling_index > 12) { if (ac->oc[1].m4ac.sampling_index > 12) {
av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->m4ac.sampling_index); av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->oc[1].m4ac.sampling_index);
return -1; err = -1;
goto fail;
} }
} }
...@@ -2364,7 +2410,8 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, ...@@ -2364,7 +2410,8 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
if (!(che=get_che(ac, elem_type, elem_id))) { if (!(che=get_che(ac, elem_type, elem_id))) {
av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n", av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
elem_type, elem_id); elem_type, elem_id);
return -1; err = -1;
goto fail;
} }
samples = 1024; samples = 1024;
} }
...@@ -2397,17 +2444,20 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, ...@@ -2397,17 +2444,20 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
case TYPE_PCE: { case TYPE_PCE: {
uint8_t layout_map[MAX_ELEM_ID*4][3]; uint8_t layout_map[MAX_ELEM_ID*4][3];
int tags; int tags;
tags = decode_pce(avctx, &ac->m4ac, layout_map, gb); push_output_configuration(ac);
tags = decode_pce(avctx, &ac->oc[1].m4ac, layout_map, gb);
if (tags < 0) { if (tags < 0) {
err = tags; err = tags;
break; break;
} }
if (ac->output_configured > OC_TRIAL_PCE) if (pce_found) {
av_log(avctx, AV_LOG_INFO, av_log(avctx, AV_LOG_INFO,
"Evaluating a further program_config_element.\n"); "Evaluating a further program_config_element.\n");
}
err = output_configure(ac, layout_map, tags, 0, OC_TRIAL_PCE); err = output_configure(ac, layout_map, tags, 0, OC_TRIAL_PCE);
if (!err) if (!err)
ac->m4ac.chan_config = 0; ac->oc[1].m4ac.chan_config = 0;
pce_found = 1;
break; break;
} }
...@@ -2416,7 +2466,8 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, ...@@ -2416,7 +2466,8 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
elem_id += get_bits(gb, 8) - 1; elem_id += get_bits(gb, 8) - 1;
if (get_bits_left(gb) < 8 * elem_id) { if (get_bits_left(gb) < 8 * elem_id) {
av_log(avctx, AV_LOG_ERROR, overread_err); av_log(avctx, AV_LOG_ERROR, overread_err);
return -1; err = -1;
goto fail;
} }
while (elem_id > 0) while (elem_id > 0)
elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, elem_type_prev); elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, elem_type_prev);
...@@ -2432,29 +2483,27 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, ...@@ -2432,29 +2483,27 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
elem_type_prev = elem_type; elem_type_prev = elem_type;
if (err) if (err)
return err; goto fail;
if (get_bits_left(gb) < 3) { if (get_bits_left(gb) < 3) {
av_log(avctx, AV_LOG_ERROR, overread_err); av_log(avctx, AV_LOG_ERROR, overread_err);
return -1; err = -1;
goto fail;
} }
} }
spectral_to_sample(ac); spectral_to_sample(ac);
multiplier = (ac->m4ac.sbr == 1) ? ac->m4ac.ext_sample_rate > ac->m4ac.sample_rate : 0; multiplier = (ac->oc[1].m4ac.sbr == 1) ? ac->oc[1].m4ac.ext_sample_rate > ac->oc[1].m4ac.sample_rate : 0;
samples <<= multiplier; samples <<= multiplier;
if (ac->output_configured < OC_LOCKED) {
avctx->sample_rate = ac->m4ac.sample_rate << multiplier;
avctx->frame_size = samples;
}
if (samples) { if (samples) {
/* get output buffer */ /* get output buffer */
ac->frame.nb_samples = samples; ac->frame.nb_samples = samples;
if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) { if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return err; err = -1;
goto fail;
} }
if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT) if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT)
...@@ -2470,10 +2519,16 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, ...@@ -2470,10 +2519,16 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
} }
*got_frame_ptr = !!samples; *got_frame_ptr = !!samples;
if (ac->output_configured && audio_found) if (ac->oc[1].status && audio_found) {
ac->output_configured = OC_LOCKED; avctx->sample_rate = ac->oc[1].m4ac.sample_rate << multiplier;
avctx->frame_size = samples;
ac->oc[1].status = OC_LOCKED;
}
return 0; return 0;
fail:
pop_output_configuration(ac);
return err;
} }
static int aac_decode_frame(AVCodecContext *avctx, void *data, static int aac_decode_frame(AVCodecContext *avctx, void *data,
...@@ -2499,11 +2554,14 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data, ...@@ -2499,11 +2554,14 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
avctx->extradata_size = new_extradata_size; avctx->extradata_size = new_extradata_size;
memcpy(avctx->extradata, new_extradata, new_extradata_size); memcpy(avctx->extradata, new_extradata, new_extradata_size);
if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac, push_output_configuration(ac);
if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
avctx->extradata, avctx->extradata,
avctx->extradata_size*8, 1) < 0) avctx->extradata_size*8, 1) < 0) {
pop_output_configuration(ac);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
}
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
...@@ -2562,7 +2620,7 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx, ...@@ -2562,7 +2620,7 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
{ {
AACContext *ac = &latmctx->aac_ctx; AACContext *ac = &latmctx->aac_ctx;
AVCodecContext *avctx = ac->avctx; AVCodecContext *avctx = ac->avctx;
MPEG4AudioConfig m4ac = {0}; MPEG4AudioConfig m4ac = { 0 };
int config_start_bit = get_bits_count(gb); int config_start_bit = get_bits_count(gb);
int sync_extension = 0; int sync_extension = 0;
int bits_consumed, esize; int bits_consumed, esize;
...@@ -2587,8 +2645,8 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx, ...@@ -2587,8 +2645,8 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
if (bits_consumed < 0) if (bits_consumed < 0)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (ac->m4ac.sample_rate != m4ac.sample_rate || if (ac->oc[1].m4ac.sample_rate != m4ac.sample_rate ||
ac->m4ac.chan_config != m4ac.chan_config) { ac->oc[1].m4ac.chan_config != m4ac.chan_config) {
av_log(avctx, AV_LOG_INFO, "audio config changed\n"); av_log(avctx, AV_LOG_INFO, "audio config changed\n");
latmctx->initialized = 0; latmctx->initialized = 0;
...@@ -2769,10 +2827,13 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, ...@@ -2769,10 +2827,13 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out,
*got_frame_ptr = 0; *got_frame_ptr = 0;
return avpkt->size; return avpkt->size;
} else { } else {
push_output_configuration(&latmctx->aac_ctx);
if ((err = decode_audio_specific_config( if ((err = decode_audio_specific_config(
&latmctx->aac_ctx, avctx, &latmctx->aac_ctx.m4ac, &latmctx->aac_ctx, avctx, &latmctx->aac_ctx.oc[1].m4ac,
avctx->extradata, avctx->extradata_size*8, 1)) < 0) avctx->extradata, avctx->extradata_size*8, 1)) < 0) {
pop_output_configuration(&latmctx->aac_ctx);
return err; return err;
}
latmctx->initialized = 1; latmctx->initialized = 1;
} }
} }
......
...@@ -571,8 +571,10 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, ...@@ -571,8 +571,10 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
} }
start_ch += chans; start_ch += chans;
} }
if ((ret = ff_alloc_packet2(avctx, avpkt, 768 * s->channels))) if ((ret = ff_alloc_packet2(avctx, avpkt, 768 * s->channels))) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
return ret; return ret;
}
do { do {
int frame_bits; int frame_bits;
......
...@@ -918,7 +918,7 @@ static void read_sbr_extension(AACContext *ac, SpectralBandReplication *sbr, ...@@ -918,7 +918,7 @@ static void read_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
{ {
switch (bs_extension_id) { switch (bs_extension_id) {
case EXTENSION_ID_PS: case EXTENSION_ID_PS:
if (!ac->m4ac.ps) { if (!ac->oc[1].m4ac.ps) {
av_log(ac->avctx, AV_LOG_ERROR, "Parametric Stereo signaled to be not-present but was found in the bitstream.\n"); av_log(ac->avctx, AV_LOG_ERROR, "Parametric Stereo signaled to be not-present but was found in the bitstream.\n");
skip_bits_long(gb, *num_bits_left); // bs_fill_bits skip_bits_long(gb, *num_bits_left); // bs_fill_bits
*num_bits_left = 0; *num_bits_left = 0;
...@@ -1077,9 +1077,9 @@ int ff_decode_sbr_extension(AACContext *ac, SpectralBandReplication *sbr, ...@@ -1077,9 +1077,9 @@ int ff_decode_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
sbr->reset = 0; sbr->reset = 0;
if (!sbr->sample_rate) if (!sbr->sample_rate)
sbr->sample_rate = 2 * ac->m4ac.sample_rate; //TODO use the nominal sample rate for arbitrary sample rate support sbr->sample_rate = 2 * ac->oc[1].m4ac.sample_rate; //TODO use the nominal sample rate for arbitrary sample rate support
if (!ac->m4ac.ext_sample_rate) if (!ac->oc[1].m4ac.ext_sample_rate)
ac->m4ac.ext_sample_rate = 2 * ac->m4ac.sample_rate; ac->oc[1].m4ac.ext_sample_rate = 2 * ac->oc[1].m4ac.sample_rate;
if (crc) { if (crc) {
skip_bits(gb, 10); // bs_sbr_crc_bits; TODO - implement CRC check skip_bits(gb, 10); // bs_sbr_crc_bits; TODO - implement CRC check
...@@ -1654,7 +1654,7 @@ static void sbr_hf_assemble(float Y1[38][64][2], ...@@ -1654,7 +1654,7 @@ static void sbr_hf_assemble(float Y1[38][64][2],
void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac, void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
float* L, float* R) float* L, float* R)
{ {
int downsampled = ac->m4ac.ext_sample_rate < sbr->sample_rate; int downsampled = ac->oc[1].m4ac.ext_sample_rate < sbr->sample_rate;
int ch; int ch;
int nch = (id_aac == TYPE_CPE) ? 2 : 1; int nch = (id_aac == TYPE_CPE) ? 2 : 1;
int err; int err;
...@@ -1701,7 +1701,7 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac, ...@@ -1701,7 +1701,7 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
sbr->X_low, ch); sbr->X_low, ch);
} }
if (ac->m4ac.ps == 1) { if (ac->oc[1].m4ac.ps == 1) {
if (sbr->ps.start) { if (sbr->ps.start) {
ff_ps_apply(ac->avctx, &sbr->ps, sbr->X[0], sbr->X[1], sbr->kx[1] + sbr->m[1]); ff_ps_apply(ac->avctx, &sbr->ps, sbr->X[0], sbr->X[1], sbr->kx[1] + sbr->m[1]);
} else { } else {
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <stdint.h> #include <stdint.h>
#include "libavutil/attributes.h"
#include "dv_vlc_data.h" #include "dv_vlc_data.h"
#if CONFIG_SMALL #if CONFIG_SMALL
...@@ -48,7 +47,7 @@ typedef struct dv_vlc_pair { ...@@ -48,7 +47,7 @@ typedef struct dv_vlc_pair {
#else #else
static struct dv_vlc_pair dv_vlc_map[DV_VLC_MAP_RUN_SIZE][DV_VLC_MAP_LEV_SIZE]; static struct dv_vlc_pair dv_vlc_map[DV_VLC_MAP_RUN_SIZE][DV_VLC_MAP_LEV_SIZE];
static void av_unused dv_vlc_map_tableinit(void) static void dv_vlc_map_tableinit(void)
{ {
int i, j; int i, j;
for (i = 0; i < NB_DV_VLC - 1; i++) { for (i = 0; i < NB_DV_VLC - 1; i++) {
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "audio_frame_queue.h" #include "audio_frame_queue.h"
#include "internal.h" #include "internal.h"
#include "libavutil/audioconvert.h"
/* libfaac has an encoder delay of 1024 samples */ /* libfaac has an encoder delay of 1024 samples */
...@@ -39,13 +40,6 @@ typedef struct FaacAudioContext { ...@@ -39,13 +40,6 @@ typedef struct FaacAudioContext {
AudioFrameQueue afq; AudioFrameQueue afq;
} FaacAudioContext; } FaacAudioContext;
static const int channel_maps[][6] = {
{ 2, 0, 1 }, //< C L R
{ 2, 0, 1, 3 }, //< C L R Cs
{ 2, 0, 1, 3, 4 }, //< C L R Ls Rs
{ 2, 0, 1, 4, 5, 3 }, //< C L R Ls Rs LFE
};
static av_cold int Faac_encode_close(AVCodecContext *avctx) static av_cold int Faac_encode_close(AVCodecContext *avctx)
{ {
FaacAudioContext *s = avctx->priv_data; FaacAudioContext *s = avctx->priv_data;
...@@ -62,6 +56,13 @@ static av_cold int Faac_encode_close(AVCodecContext *avctx) ...@@ -62,6 +56,13 @@ static av_cold int Faac_encode_close(AVCodecContext *avctx)
return 0; return 0;
} }
static const int channel_maps[][6] = {
{ 2, 0, 1 }, //< C L R
{ 2, 0, 1, 3 }, //< C L R Cs
{ 2, 0, 1, 3, 4 }, //< C L R Ls Rs
{ 2, 0, 1, 4, 5, 3 }, //< C L R Ls Rs LFE
};
static av_cold int Faac_encode_init(AVCodecContext *avctx) static av_cold int Faac_encode_init(AVCodecContext *avctx)
{ {
FaacAudioContext *s = avctx->priv_data; FaacAudioContext *s = avctx->priv_data;
...@@ -184,8 +185,10 @@ static int Faac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, ...@@ -184,8 +185,10 @@ static int Faac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
int num_samples = frame ? frame->nb_samples : 0; int num_samples = frame ? frame->nb_samples : 0;
void *samples = frame ? frame->data[0] : NULL; void *samples = frame ? frame->data[0] : NULL;
if ((ret = ff_alloc_packet2(avctx, avpkt, (7 + 768) * avctx->channels))) if ((ret = ff_alloc_packet2(avctx, avpkt, (7 + 768) * avctx->channels))) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
return ret; return ret;
}
bytes_written = faacEncEncode(s->faac_handle, samples, bytes_written = faacEncEncode(s->faac_handle, samples,
num_samples * avctx->channels, num_samples * avctx->channels,
...@@ -221,6 +224,16 @@ static const AVProfile profiles[] = { ...@@ -221,6 +224,16 @@ static const AVProfile profiles[] = {
{ FF_PROFILE_UNKNOWN }, { FF_PROFILE_UNKNOWN },
}; };
static const uint64_t faac_channel_layouts[] = {
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_4POINT0,
AV_CH_LAYOUT_5POINT0_BACK,
AV_CH_LAYOUT_5POINT1_BACK,
0
};
AVCodec ff_libfaac_encoder = { AVCodec ff_libfaac_encoder = {
.name = "libfaac", .name = "libfaac",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
...@@ -234,4 +247,5 @@ AVCodec ff_libfaac_encoder = { ...@@ -234,4 +247,5 @@ AVCodec ff_libfaac_encoder = {
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("libfaac AAC (Advanced Audio Codec)"), .long_name = NULL_IF_CONFIG_SMALL("libfaac AAC (Advanced Audio Codec)"),
.profiles = NULL_IF_CONFIG_SMALL(profiles), .profiles = NULL_IF_CONFIG_SMALL(profiles),
.channel_layouts = faac_channel_layouts,
}; };
...@@ -169,6 +169,13 @@ struct TrellisNode { ...@@ -169,6 +169,13 @@ struct TrellisNode {
#define MAX_STORED_Q 16 #define MAX_STORED_Q 16
typedef struct ProresThreadData {
DECLARE_ALIGNED(16, DCTELEM, blocks)[MAX_PLANES][64 * 4 * MAX_MBS_PER_SLICE];
DECLARE_ALIGNED(16, uint16_t, emu_buf)[16 * 16];
int16_t custom_q[64];
struct TrellisNode *nodes;
} ProresThreadData;
typedef struct ProresContext { typedef struct ProresContext {
AVClass *class; AVClass *class;
DECLARE_ALIGNED(16, DCTELEM, blocks)[MAX_PLANES][64 * 4 * MAX_MBS_PER_SLICE]; DECLARE_ALIGNED(16, DCTELEM, blocks)[MAX_PLANES][64 * 4 * MAX_MBS_PER_SLICE];
...@@ -197,13 +204,14 @@ typedef struct ProresContext { ...@@ -197,13 +204,14 @@ typedef struct ProresContext {
int profile; int profile;
const struct prores_profile *profile_info; const struct prores_profile *profile_info;
struct TrellisNode *nodes;
int *slice_q; int *slice_q;
ProresThreadData *tdata;
} ProresContext; } ProresContext;
static void get_slice_data(ProresContext *ctx, const uint16_t *src, static void get_slice_data(ProresContext *ctx, const uint16_t *src,
int linesize, int x, int y, int w, int h, int linesize, int x, int y, int w, int h,
DCTELEM *blocks, DCTELEM *blocks, uint16_t *emu_buf,
int mbs_per_slice, int blocks_per_mb, int is_chroma) int mbs_per_slice, int blocks_per_mb, int is_chroma)
{ {
const uint16_t *esrc; const uint16_t *esrc;
...@@ -223,24 +231,24 @@ static void get_slice_data(ProresContext *ctx, const uint16_t *src, ...@@ -223,24 +231,24 @@ static void get_slice_data(ProresContext *ctx, const uint16_t *src,
} else { } else {
int bw, bh, pix; int bw, bh, pix;
esrc = ctx->emu_buf; esrc = emu_buf;
elinesize = 16 * sizeof(*ctx->emu_buf); elinesize = 16 * sizeof(*emu_buf);
bw = FFMIN(w - x, mb_width); bw = FFMIN(w - x, mb_width);
bh = FFMIN(h - y, 16); bh = FFMIN(h - y, 16);
for (j = 0; j < bh; j++) { for (j = 0; j < bh; j++) {
memcpy(ctx->emu_buf + j * 16, memcpy(emu_buf + j * 16,
(const uint8_t*)src + j * linesize, (const uint8_t*)src + j * linesize,
bw * sizeof(*src)); bw * sizeof(*src));
pix = ctx->emu_buf[j * 16 + bw - 1]; pix = emu_buf[j * 16 + bw - 1];
for (k = bw; k < mb_width; k++) for (k = bw; k < mb_width; k++)
ctx->emu_buf[j * 16 + k] = pix; emu_buf[j * 16 + k] = pix;
} }
for (; j < 16; j++) for (; j < 16; j++)
memcpy(ctx->emu_buf + j * 16, memcpy(emu_buf + j * 16,
ctx->emu_buf + (bh - 1) * 16, emu_buf + (bh - 1) * 16,
mb_width * sizeof(*ctx->emu_buf)); mb_width * sizeof(*emu_buf));
} }
if (!is_chroma) { if (!is_chroma) {
ctx->dsp.fdct(esrc, elinesize, blocks); ctx->dsp.fdct(esrc, elinesize, blocks);
...@@ -429,7 +437,7 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, ...@@ -429,7 +437,7 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
src = (const uint16_t*)(pic->data[i] + yp * pic->linesize[i]) + xp; src = (const uint16_t*)(pic->data[i] + yp * pic->linesize[i]) + xp;
get_slice_data(ctx, src, pic->linesize[i], xp, yp, get_slice_data(ctx, src, pic->linesize[i], xp, yp,
pwidth, avctx->height, ctx->blocks[0], pwidth, avctx->height, ctx->blocks[0], ctx->emu_buf,
mbs_per_slice, num_cblocks, is_chroma); mbs_per_slice, num_cblocks, is_chroma);
sizes[i] = encode_slice_plane(ctx, pb, src, pic->linesize[i], sizes[i] = encode_slice_plane(ctx, pb, src, pic->linesize[i],
mbs_per_slice, ctx->blocks[0], mbs_per_slice, ctx->blocks[0],
...@@ -533,22 +541,23 @@ static int estimate_slice_plane(ProresContext *ctx, int *error, int plane, ...@@ -533,22 +541,23 @@ static int estimate_slice_plane(ProresContext *ctx, int *error, int plane,
const uint16_t *src, int linesize, const uint16_t *src, int linesize,
int mbs_per_slice, int mbs_per_slice,
int blocks_per_mb, int plane_size_factor, int blocks_per_mb, int plane_size_factor,
const int16_t *qmat) const int16_t *qmat, ProresThreadData *td)
{ {
int blocks_per_slice; int blocks_per_slice;
int bits; int bits;
blocks_per_slice = mbs_per_slice * blocks_per_mb; blocks_per_slice = mbs_per_slice * blocks_per_mb;
bits = estimate_dcs(error, ctx->blocks[plane], blocks_per_slice, qmat[0]); bits = estimate_dcs(error, td->blocks[plane], blocks_per_slice, qmat[0]);
bits += estimate_acs(error, ctx->blocks[plane], blocks_per_slice, bits += estimate_acs(error, td->blocks[plane], blocks_per_slice,
plane_size_factor, ctx->scantable.permutated, qmat); plane_size_factor, ctx->scantable.permutated, qmat);
return FFALIGN(bits, 8); return FFALIGN(bits, 8);
} }
static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
int trellis_node, int x, int y, int mbs_per_slice) int trellis_node, int x, int y, int mbs_per_slice,
ProresThreadData *td)
{ {
ProresContext *ctx = avctx->priv_data; ProresContext *ctx = avctx->priv_data;
int i, q, pq, xp, yp; int i, q, pq, xp, yp;
...@@ -585,13 +594,13 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, ...@@ -585,13 +594,13 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
src = (const uint16_t*)(pic->data[i] + yp * pic->linesize[i]) + xp; src = (const uint16_t*)(pic->data[i] + yp * pic->linesize[i]) + xp;
get_slice_data(ctx, src, pic->linesize[i], xp, yp, get_slice_data(ctx, src, pic->linesize[i], xp, yp,
pwidth, avctx->height, ctx->blocks[i], pwidth, avctx->height, td->blocks[i], td->emu_buf,
mbs_per_slice, num_cblocks[i], is_chroma[i]); mbs_per_slice, num_cblocks[i], is_chroma[i]);
} }
for (q = min_quant; q < max_quant + 2; q++) { for (q = min_quant; q < max_quant + 2; q++) {
ctx->nodes[trellis_node + q].prev_node = -1; td->nodes[trellis_node + q].prev_node = -1;
ctx->nodes[trellis_node + q].quant = q; td->nodes[trellis_node + q].quant = q;
} }
// todo: maybe perform coarser quantising to fit into frame size when needed // todo: maybe perform coarser quantising to fit into frame size when needed
...@@ -603,7 +612,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, ...@@ -603,7 +612,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
src, pic->linesize[i], src, pic->linesize[i],
mbs_per_slice, mbs_per_slice,
num_cblocks[i], plane_factor[i], num_cblocks[i], plane_factor[i],
ctx->quants[q]); ctx->quants[q], td);
} }
if (bits > 65000 * 8) { if (bits > 65000 * 8) {
error = SCORE_LIMIT; error = SCORE_LIMIT;
...@@ -623,7 +632,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, ...@@ -623,7 +632,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
if (q < MAX_STORED_Q) { if (q < MAX_STORED_Q) {
qmat = ctx->quants[q]; qmat = ctx->quants[q];
} else { } else {
qmat = ctx->custom_q; qmat = td->custom_q;
for (i = 0; i < 64; i++) for (i = 0; i < 64; i++)
qmat[i] = ctx->quant_mat[i] * q; qmat[i] = ctx->quant_mat[i] * q;
} }
...@@ -632,7 +641,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, ...@@ -632,7 +641,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
src, pic->linesize[i], src, pic->linesize[i],
mbs_per_slice, mbs_per_slice,
num_cblocks[i], plane_factor[i], num_cblocks[i], plane_factor[i],
qmat); qmat, td);
} }
if (bits <= ctx->bits_per_mb * mbs_per_slice) if (bits <= ctx->bits_per_mb * mbs_per_slice)
break; break;
...@@ -642,7 +651,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, ...@@ -642,7 +651,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
slice_score[max_quant + 1] = error; slice_score[max_quant + 1] = error;
overquant = q; overquant = q;
} }
ctx->nodes[trellis_node + max_quant + 1].quant = overquant; td->nodes[trellis_node + max_quant + 1].quant = overquant;
bits_limit = mbs * ctx->bits_per_mb; bits_limit = mbs * ctx->bits_per_mb;
for (pq = min_quant; pq < max_quant + 2; pq++) { for (pq = min_quant; pq < max_quant + 2; pq++) {
...@@ -651,30 +660,30 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, ...@@ -651,30 +660,30 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
for (q = min_quant; q < max_quant + 2; q++) { for (q = min_quant; q < max_quant + 2; q++) {
cur = trellis_node + q; cur = trellis_node + q;
bits = ctx->nodes[prev].bits + slice_bits[q]; bits = td->nodes[prev].bits + slice_bits[q];
error = slice_score[q]; error = slice_score[q];
if (bits > bits_limit) if (bits > bits_limit)
error = SCORE_LIMIT; error = SCORE_LIMIT;
if (ctx->nodes[prev].score < SCORE_LIMIT && error < SCORE_LIMIT) if (td->nodes[prev].score < SCORE_LIMIT && error < SCORE_LIMIT)
new_score = ctx->nodes[prev].score + error; new_score = td->nodes[prev].score + error;
else else
new_score = SCORE_LIMIT; new_score = SCORE_LIMIT;
if (ctx->nodes[cur].prev_node == -1 || if (td->nodes[cur].prev_node == -1 ||
ctx->nodes[cur].score >= new_score) { td->nodes[cur].score >= new_score) {
ctx->nodes[cur].bits = bits; td->nodes[cur].bits = bits;
ctx->nodes[cur].score = new_score; td->nodes[cur].score = new_score;
ctx->nodes[cur].prev_node = prev; td->nodes[cur].prev_node = prev;
} }
} }
} }
error = ctx->nodes[trellis_node + min_quant].score; error = td->nodes[trellis_node + min_quant].score;
pq = trellis_node + min_quant; pq = trellis_node + min_quant;
for (q = min_quant + 1; q < max_quant + 2; q++) { for (q = min_quant + 1; q < max_quant + 2; q++) {
if (ctx->nodes[trellis_node + q].score <= error) { if (td->nodes[trellis_node + q].score <= error) {
error = ctx->nodes[trellis_node + q].score; error = td->nodes[trellis_node + q].score;
pq = trellis_node + q; pq = trellis_node + q;
} }
} }
...@@ -682,6 +691,30 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, ...@@ -682,6 +691,30 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
return pq; return pq;
} }
static int find_quant_thread(AVCodecContext *avctx, void *arg,
int jobnr, int threadnr)
{
ProresContext *ctx = avctx->priv_data;
ProresThreadData *td = ctx->tdata + threadnr;
int mbs_per_slice = ctx->mbs_per_slice;
int x, y = jobnr, mb, q = 0;
for (x = mb = 0; x < ctx->mb_width; x += mbs_per_slice, mb++) {
while (ctx->mb_width - x < mbs_per_slice)
mbs_per_slice >>= 1;
q = find_slice_quant(avctx, avctx->coded_frame,
(mb + 1) * TRELLIS_WIDTH, x, y,
mbs_per_slice, td);
}
for (x = ctx->slices_width - 1; x >= 0; x--) {
ctx->slice_q[x + y * ctx->slices_width] = td->nodes[q].quant;
q = td->nodes[q].prev_node;
}
return 0;
}
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pic, int *got_packet) const AVFrame *pic, int *got_packet)
{ {
...@@ -751,25 +784,18 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -751,25 +784,18 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
buf += ctx->num_slices * 2; buf += ctx->num_slices * 2;
// slices // slices
for (y = 0; y < ctx->mb_height; y++) {
mbs_per_slice = ctx->mbs_per_slice;
if (!ctx->force_quant) { if (!ctx->force_quant) {
for (x = mb = 0; x < ctx->mb_width; x += mbs_per_slice, mb++) { ret = avctx->execute2(avctx, find_quant_thread, NULL, NULL,
while (ctx->mb_width - x < mbs_per_slice) ctx->mb_height);
mbs_per_slice >>= 1; if (ret)
q = find_slice_quant(avctx, pic, (mb + 1) * TRELLIS_WIDTH, x, y, return ret;
mbs_per_slice);
}
for (x = ctx->slices_width - 1; x >= 0; x--) {
ctx->slice_q[x] = ctx->nodes[q].quant;
q = ctx->nodes[q].prev_node;
}
} }
for (y = 0; y < ctx->mb_height; y++) {
mbs_per_slice = ctx->mbs_per_slice; mbs_per_slice = ctx->mbs_per_slice;
for (x = mb = 0; x < ctx->mb_width; x += mbs_per_slice, mb++) { for (x = mb = 0; x < ctx->mb_width; x += mbs_per_slice, mb++) {
q = ctx->force_quant ? ctx->force_quant : ctx->slice_q[mb]; q = ctx->force_quant ? ctx->force_quant
: ctx->slice_q[mb + y * ctx->slices_width];
while (ctx->mb_width - x < mbs_per_slice) while (ctx->mb_width - x < mbs_per_slice)
mbs_per_slice >>= 1; mbs_per_slice >>= 1;
...@@ -807,13 +833,18 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, ...@@ -807,13 +833,18 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
static av_cold int encode_close(AVCodecContext *avctx) static av_cold int encode_close(AVCodecContext *avctx)
{ {
ProresContext *ctx = avctx->priv_data; ProresContext *ctx = avctx->priv_data;
int i;
if (avctx->coded_frame->data[0]) if (avctx->coded_frame->data[0])
avctx->release_buffer(avctx, avctx->coded_frame); avctx->release_buffer(avctx, avctx->coded_frame);
av_freep(&avctx->coded_frame); av_freep(&avctx->coded_frame);
av_freep(&ctx->nodes); if (ctx->tdata) {
for (i = 0; i < avctx->thread_count; i++)
av_free(ctx->tdata[i].nodes);
}
av_freep(&ctx->tdata);
av_freep(&ctx->slice_q); av_freep(&ctx->slice_q);
return 0; return 0;
...@@ -883,23 +914,32 @@ static av_cold int encode_init(AVCodecContext *avctx) ...@@ -883,23 +914,32 @@ static av_cold int encode_init(AVCodecContext *avctx)
ctx->quants[i][j] = ctx->quant_mat[j] * i; ctx->quants[i][j] = ctx->quant_mat[j] * i;
} }
ctx->nodes = av_malloc((ctx->slices_width + 1) * TRELLIS_WIDTH ctx->slice_q = av_malloc(ctx->num_slices * sizeof(*ctx->slice_q));
* sizeof(*ctx->nodes)); if (!ctx->slice_q) {
if (!ctx->nodes) {
encode_close(avctx); encode_close(avctx);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
for (i = min_quant; i < max_quant + 2; i++) {
ctx->nodes[i].prev_node = -1; ctx->tdata = av_mallocz(avctx->thread_count * sizeof(*ctx->tdata));
ctx->nodes[i].bits = 0; if (!ctx->tdata) {
ctx->nodes[i].score = 0; encode_close(avctx);
return AVERROR(ENOMEM);
} }
ctx->slice_q = av_malloc(ctx->slices_width * sizeof(*ctx->slice_q)); for (j = 0; j < avctx->thread_count; j++) {
if (!ctx->slice_q) { ctx->tdata[j].nodes = av_malloc((ctx->slices_width + 1)
* TRELLIS_WIDTH
* sizeof(*ctx->tdata->nodes));
if (!ctx->tdata[j].nodes) {
encode_close(avctx); encode_close(avctx);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
for (i = min_quant; i < max_quant + 2; i++) {
ctx->tdata[j].nodes[i].prev_node = -1;
ctx->tdata[j].nodes[i].bits = 0;
ctx->tdata[j].nodes[i].score = 0;
}
}
} else { } else {
int ls = 0; int ls = 0;
...@@ -987,6 +1027,7 @@ AVCodec ff_prores_kostya_encoder = { ...@@ -987,6 +1027,7 @@ AVCodec ff_prores_kostya_encoder = {
.init = encode_init, .init = encode_init,
.close = encode_close, .close = encode_close,
.encode2 = encode_frame, .encode2 = encode_frame,
.capabilities = CODEC_CAP_SLICE_THREADS,
.long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)"), .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)"),
.pix_fmts = (const enum PixelFormat[]) { .pix_fmts = (const enum PixelFormat[]) {
PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_NONE PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_NONE
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/intfloat.h" #include "libavutil/intfloat.h"
#include "libavutil/lfg.h" #include "libavutil/lfg.h"
#include "libavutil/opt.h"
#include "libavutil/sha.h" #include "libavutil/sha.h"
#include "avformat.h" #include "avformat.h"
#include "internal.h" #include "internal.h"
...@@ -41,6 +42,9 @@ ...@@ -41,6 +42,9 @@
//#define DEBUG //#define DEBUG
#define APP_MAX_LENGTH 128
#define PLAYPATH_MAX_LENGTH 256
/** RTMP protocol handler state */ /** RTMP protocol handler state */
typedef enum { typedef enum {
STATE_START, ///< client has not done anything yet STATE_START, ///< client has not done anything yet
...@@ -56,12 +60,13 @@ typedef enum { ...@@ -56,12 +60,13 @@ typedef enum {
/** protocol handler context */ /** protocol handler context */
typedef struct RTMPContext { typedef struct RTMPContext {
const AVClass *class;
URLContext* stream; ///< TCP stream used in interactions with RTMP server URLContext* stream; ///< TCP stream used in interactions with RTMP server
RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets
int chunk_size; ///< size of the chunks RTMP packets are divided into int chunk_size; ///< size of the chunks RTMP packets are divided into
int is_input; ///< input/output flag int is_input; ///< input/output flag
char playpath[256]; ///< path to filename to play (with possible "mp4:" prefix) char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
char app[128]; ///< application char *app; ///< name of application
ClientState state; ///< current state ClientState state; ///< current state
int main_channel_id; ///< an additional channel ID which is used for some invocations int main_channel_id; ///< an additional channel ID which is used for some invocations
uint8_t* flv_data; ///< buffer with data for demuxer uint8_t* flv_data; ///< buffer with data for demuxer
...@@ -822,6 +827,7 @@ static int rtmp_open(URLContext *s, const char *uri, int flags) ...@@ -822,6 +827,7 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
{ {
RTMPContext *rt = s->priv_data; RTMPContext *rt = s->priv_data;
char proto[8], hostname[256], path[1024], *fname; char proto[8], hostname[256], path[1024], *fname;
char *old_app;
uint8_t buf[2048]; uint8_t buf[2048];
int port; int port;
int ret; int ret;
...@@ -847,6 +853,16 @@ static int rtmp_open(URLContext *s, const char *uri, int flags) ...@@ -847,6 +853,16 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
rt->chunk_size = 128; rt->chunk_size = 128;
rt->state = STATE_HANDSHAKED; rt->state = STATE_HANDSHAKED;
// Keep the application name when it has been defined by the user.
old_app = rt->app;
rt->app = av_malloc(APP_MAX_LENGTH);
if (!rt->app) {
rtmp_close(s);
return AVERROR(ENOMEM);
}
//extract "app" part from path //extract "app" part from path
if (!strncmp(path, "/ondemand/", 10)) { if (!strncmp(path, "/ondemand/", 10)) {
fname = path + 10; fname = path + 10;
...@@ -868,6 +884,20 @@ static int rtmp_open(URLContext *s, const char *uri, int flags) ...@@ -868,6 +884,20 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
} }
} }
} }
if (old_app) {
// The name of application has been defined by the user, override it.
av_free(rt->app);
rt->app = old_app;
}
if (!rt->playpath) {
rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
if (!rt->playpath) {
rtmp_close(s);
return AVERROR(ENOMEM);
}
if (!strchr(fname, ':') && if (!strchr(fname, ':') &&
(!strcmp(fname + strlen(fname) - 4, ".f4v") || (!strcmp(fname + strlen(fname) - 4, ".f4v") ||
!strcmp(fname + strlen(fname) - 4, ".mp4"))) { !strcmp(fname + strlen(fname) - 4, ".mp4"))) {
...@@ -875,7 +905,8 @@ static int rtmp_open(URLContext *s, const char *uri, int flags) ...@@ -875,7 +905,8 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
} else { } else {
rt->playpath[0] = 0; rt->playpath[0] = 0;
} }
strncat(rt->playpath, fname, sizeof(rt->playpath) - 5); strncat(rt->playpath, fname, PLAYPATH_MAX_LENGTH - 5);
}
rt->client_report_size = 1048576; rt->client_report_size = 1048576;
rt->bytes_read = 0; rt->bytes_read = 0;
...@@ -1013,6 +1044,23 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size) ...@@ -1013,6 +1044,23 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
return size; return size;
} }
#define OFFSET(x) offsetof(RTMPContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption rtmp_options[] = {
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{ NULL },
};
static const AVClass rtmp_class = {
.class_name = "rtmp",
.item_name = av_default_item_name,
.option = rtmp_options,
.version = LIBAVUTIL_VERSION_INT,
};
URLProtocol ff_rtmp_protocol = { URLProtocol ff_rtmp_protocol = {
.name = "rtmp", .name = "rtmp",
.url_open = rtmp_open, .url_open = rtmp_open,
...@@ -1021,4 +1069,5 @@ URLProtocol ff_rtmp_protocol = { ...@@ -1021,4 +1069,5 @@ URLProtocol ff_rtmp_protocol = {
.url_close = rtmp_close, .url_close = rtmp_close,
.priv_data_size = sizeof(RTMPContext), .priv_data_size = sizeof(RTMPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK, .flags = URL_PROTOCOL_FLAG_NETWORK,
.priv_data_class= &rtmp_class,
}; };
...@@ -22,7 +22,26 @@ ...@@ -22,7 +22,26 @@
#include "avutil.h" #include "avutil.h"
/** /**
* all in native-endian format * Audio Sample Formats
*
* @par
* The data described by the sample format is always in native-endian order.
* Sample values can be expressed by native C types, hence the lack of a signed
* 24-bit sample format even though it is a common raw audio data format.
*
* @par
* The floating-point formats are based on full volume being in the range
* [-1.0, 1.0]. Any values outside this range are beyond full volume level.
*
* @par
* The data layout as used in av_samples_fill_arrays() and elsewhere in Libav
* (such as AVFrame in libavcodec) is as follows:
*
* For planar sample formats, each audio channel is in a separate data plane,
* and linesize is the buffer size, in bytes, for a single plane. All data
* planes must be the same size. For packed sample formats, only the first data
* plane is used, and samples for each channel are interleaved. In this case,
* linesize is the buffer size, in bytes, for the 1 plane.
*/ */
enum AVSampleFormat { enum AVSampleFormat {
AV_SAMPLE_FMT_NONE = -1, AV_SAMPLE_FMT_NONE = -1,
...@@ -147,6 +166,9 @@ int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, ...@@ -147,6 +166,9 @@ int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
* buffer for planar layout, or the aligned size of the buffer for all channels * buffer for planar layout, or the aligned size of the buffer for all channels
* for packed layout. * for packed layout.
* *
* @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout.
*
* @param[out] audio_data array to be filled with the pointer for each channel * @param[out] audio_data array to be filled with the pointer for each channel
* @param[out] linesize calculated linesize, may be NULL * @param[out] linesize calculated linesize, may be NULL
* @param buf the pointer to a buffer containing the samples * @param buf the pointer to a buffer containing the samples
...@@ -165,6 +187,9 @@ int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, uint8_t *buf, ...@@ -165,6 +187,9 @@ int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, uint8_t *buf,
* linesize accordingly. * linesize accordingly.
* The allocated samples buffer can be freed by using av_freep(&audio_data[0]) * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
* *
* @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout.
*
* @param[out] audio_data array to be filled with the pointer for each channel * @param[out] audio_data array to be filled with the pointer for each channel
* @param[out] linesize aligned size for audio buffer(s), may be NULL * @param[out] linesize aligned size for audio buffer(s), may be NULL
* @param nb_channels number of audio channels * @param nb_channels number of audio channels
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment