Commit 484e59a0 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  avs: call release_buffer() at the end.
  Add minor bumps and APIchanges entries for lavc/lavfi changes.
  mpegvideo.c: K&R formatting and cosmetics.
  avconv: avoid memcpy in vsrc_buffer when possible.
  avconv: implement get_buffer()/release_buffer().
  lavfi: add a new function av_buffersrc_buffer().
  lavfi: add avfilter_copy_frame_props()
  lavc: add format field to AVFrame
  lavc: add width and height fields to AVFrame
  lavc: add a sample_aspect_ratio field to AVFrame
  doxy: add website-alike style to the html output
  FAQ: add an entry for common error when using -profile

Conflicts:
	avconv.c
	cmdutils.c
	doc/APIchanges
	libavcodec/avcodec.h
	libavcodec/mpegvideo.c
	libavcodec/utils.c
	libavcodec/version.h
	libavfilter/Makefile
	libavfilter/avfilter.c
	libavfilter/avfilter.h
	libavfilter/src_movie.c
	libavfilter/vsrc_buffer.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents c48f67f0 80dc7c01
......@@ -33,6 +33,12 @@ PROJECT_NAME = FFmpeg
PROJECT_NUMBER =
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
# pixels and the maximum width should not exceed 200 pixels. Doxygen will
# copy the logo to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
# base path where the generated documentation will be put.
# If a relative path is entered, it will be relative to the location
......@@ -761,7 +767,7 @@ ALPHABETICAL_INDEX = YES
# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
# in which this list will be split (can be a number in the range [1..20])
COLS_IN_ALPHA_INDEX = 5
COLS_IN_ALPHA_INDEX = 2
# In case all classes in a project start with a common prefix, all
# classes will be put under the same header in the alphabetical index.
......@@ -795,13 +801,13 @@ HTML_FILE_EXTENSION = .html
# each generated HTML page. If it is left blank doxygen will generate a
# standard header.
HTML_HEADER =
HTML_HEADER = doc/doxy/header.html
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
# each generated HTML page. If it is left blank doxygen will generate a
# standard footer.
HTML_FOOTER =
HTML_FOOTER = doc/doxy/footer.html
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
# style sheet that is used by each HTML page. It can be used to
......@@ -810,7 +816,7 @@ HTML_FOOTER =
# the style sheet file to the HTML output directory, so don't put your own
# stylesheet in the HTML output directory as well, or it will be erased!
HTML_STYLESHEET =
HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
# Doxygen will adjust the colors in the stylesheet and background images
......
......@@ -44,6 +44,7 @@
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
#include "libavutil/libm.h"
#include "libavutil/imgutils.h"
#include "libavformat/os_support.h"
#include "libavformat/ffm.h" // not public API
......@@ -53,6 +54,7 @@
# include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h"
# include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
# include "libavfilter/vsrc_buffer.h"
#endif
......@@ -157,6 +159,19 @@ static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
#define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
typedef struct FrameBuffer {
uint8_t *base[4];
uint8_t *data[4];
int linesize[4];
int h, w;
enum PixelFormat pix_fmt;
int refcount;
struct InputStream *ist;
struct FrameBuffer *next;
} FrameBuffer;
typedef struct InputStream {
int file_index;
AVStream *st;
......@@ -174,6 +189,9 @@ typedef struct InputStream {
int is_start; /* is 1 at the start and after a discontinuity */
int showed_multi_packet_warning;
AVDictionary *opts;
/* a pool of free buffers for decoded data */
FrameBuffer *buffer_pool;
} InputStream;
typedef struct InputFile {
......@@ -416,6 +434,131 @@ static void reset_options(OptionsContext *o)
init_opts();
}
static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf)
{
AVCodecContext *s = ist->st->codec;
FrameBuffer *buf = av_mallocz(sizeof(*buf));
int ret;
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
int h_chroma_shift, v_chroma_shift;
int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
int w = s->width, h = s->height;
if (!buf)
return AVERROR(ENOMEM);
if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
w += 2*edge;
h += 2*edge;
}
avcodec_align_dimensions(s, &w, &h);
if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
s->pix_fmt, 32)) < 0) {
av_freep(&buf);
return ret;
}
/* XXX this shouldn't be needed, but some tests break without this line
* those decoders are buggy and need to be fixed.
* the following tests fail:
* bethsoft-vid, cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
*/
memset(buf->base[0], 128, ret);
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for (int i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
const int h_shift = i==0 ? 0 : h_chroma_shift;
const int v_shift = i==0 ? 0 : v_chroma_shift;
if (s->flags & CODEC_FLAG_EMU_EDGE)
buf->data[i] = buf->base[i];
else
buf->data[i] = buf->base[i] +
FFALIGN((buf->linesize[i]*edge >> v_shift) +
(pixel_size*edge >> h_shift), 32);
}
buf->w = s->width;
buf->h = s->height;
buf->pix_fmt = s->pix_fmt;
buf->ist = ist;
*pbuf = buf;
return 0;
}
static void free_buffer_pool(InputStream *ist)
{
FrameBuffer *buf = ist->buffer_pool;
while (buf) {
ist->buffer_pool = buf->next;
av_freep(&buf->base[0]);
av_free(buf);
buf = ist->buffer_pool;
}
}
static void unref_buffer(InputStream *ist, FrameBuffer *buf)
{
av_assert0(buf->refcount);
buf->refcount--;
if (!buf->refcount) {
buf->next = ist->buffer_pool;
ist->buffer_pool = buf;
}
}
static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf;
int ret, i;
if (!ist->buffer_pool && (ret = alloc_buffer(ist, &ist->buffer_pool)) < 0)
return ret;
buf = ist->buffer_pool;
ist->buffer_pool = buf->next;
buf->next = NULL;
if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
av_freep(&buf->base[0]);
av_free(buf);
if ((ret = alloc_buffer(ist, &buf)) < 0)
return ret;
}
buf->refcount++;
frame->opaque = buf;
frame->type = FF_BUFFER_TYPE_USER;
frame->extended_data = frame->data;
frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
frame->data[i] = buf->data[i];
frame->linesize[i] = buf->linesize[i];
}
return 0;
}
static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf = frame->opaque;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
frame->data[i] = NULL;
unref_buffer(ist, buf);
}
static void filter_release_buffer(AVFilterBuffer *fb)
{
FrameBuffer *buf = fb->priv;
av_free(fb);
unref_buffer(buf->ist, buf);
}
#if CONFIG_AVFILTER
static int configure_video_filters(InputStream *ist, OutputStream *ost)
......@@ -618,6 +761,7 @@ void exit_program(int ret)
av_freep(&input_streams[i].decoded_frame);
av_freep(&input_streams[i].filtered_frame);
av_dict_free(&input_streams[i].opts);
free_buffer_pool(&input_streams[i]);
}
if (vstats_file)
......@@ -1855,20 +1999,35 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
continue;
#if CONFIG_AVFILTER
if (ost->input_video_filter) {
if (!decoded_frame->sample_aspect_ratio.num)
decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
decoded_frame->pts = ist->pts;
if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
FrameBuffer *buf = decoded_frame->opaque;
AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
decoded_frame->data, decoded_frame->linesize,
AV_PERM_READ | AV_PERM_PRESERVE,
ist->st->codec->width, ist->st->codec->height,
ist->st->codec->pix_fmt);
avfilter_copy_frame_props(fb, decoded_frame);
fb->pts = ist->pts;
fb->buf->priv = buf;
fb->buf->free = filter_release_buffer;
buf->refcount++;
av_buffersrc_buffer(ost->input_video_filter, fb);
} else
av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE);
if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
av_free(buffer_to_free);
return AVERROR(ENOMEM);
} else
avcodec_get_frame_defaults(ist->filtered_frame);
filtered_frame = ist->filtered_frame;
frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
}
while (frame_available) {
if (ost->output_video_filter) {
AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
......@@ -2055,6 +2214,12 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb
return AVERROR(EINVAL);
}
if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
ist->st->codec->get_buffer = codec_get_buffer;
ist->st->codec->release_buffer = codec_release_buffer;
ist->st->codec->opaque = ist;
}
if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
ist->file_index, ist->st->index);
......
/* The standard CSS for doxygen */
/* @group Heading Levels */
h1 {
font-size: 150%;
}
.title {
font-weight: bold;
margin: 10px 2px;
background-color: #7BB37B;
border: 1px solid #6A996A;
color: #151515;
font-size: 1.2em;
padding-bottom: 0.2em;
padding-left: 0.4em;
padding-top: 0.2em;
}
h2 {
font-size: 120%;
}
h3 {
font-size: 100%;
}
dt {
font-weight: bold;
}
div.multicol {
-moz-column-gap: 1em;
-webkit-column-gap: 1em;
-moz-column-count: 3;
-webkit-column-count: 3;
}
p.startli, p.startdd, p.starttd {
margin-top: 2px;
}
p.endli {
margin-bottom: 0px;
}
p.enddd {
margin-bottom: 4px;
}
p.endtd {
margin-bottom: 2px;
}
/* @end */
caption {
font-weight: bold;
}
span.legend {
font-size: 70%;
text-align: center;
}
h3.version {
font-size: 90%;
text-align: center;
}
div.qindex, div.navtab{
background-color: #EBF6EB;
border: 1px solid #A3D7A3;
text-align: center;
}
div.qindex, div.navpath {
width: 100%;
line-height: 140%;
}
div.navtab {
margin-right: 15px;
}
/* @group Link Styling */
a {
color: #3D8C3D;
font-weight: normal;
text-decoration: none;
}
.contents a:visited {
color: #46A246;
}
a:hover {
text-decoration: underline;
}
a.qindex {
font-weight: bold;
}
a.qindexHL {
font-weight: bold;
background-color: #9CD49C;
color: #ffffff;
border: 1px double #86CA86;
}
.contents a.qindexHL:visited {
color: #ffffff;
}
a.el {
font-weight: bold;
}
a.elRef {
}
a.code {
color: #4665A2;
}
a.codeRef {
color: #4665A2;
}
/* @end */
dl.el {
margin-left: -1cm;
}
.fragment {
font-family: monospace, fixed;
font-size: 105%;
}
pre.fragment {
border: 1px solid #C4E5C4;
background-color: #FBFDFB;
padding: 4px 6px;
margin: 4px 8px 4px 2px;
overflow: auto;
word-wrap: break-word;
font-size: 9pt;
line-height: 125%;
}
div.ah {
background-color: black;
font-weight: bold;
color: #ffffff;
margin-bottom: 3px;
margin-top: 3px;
padding: 0.2em;
border: solid thin #333;
border-radius: 0.5em;
-webkit-border-radius: .5em;
-moz-border-radius: .5em;
box-shadow: 2px 2px 3px #999;
-webkit-box-shadow: 2px 2px 3px #999;
-moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444));
background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000);
}
div.groupHeader {
margin-left: 16px;
margin-top: 12px;
font-weight: bold;
}
div.groupText {
margin-left: 16px;
font-style: italic;
}
div.contents {
margin-top: 10px;
margin-left: 8px;
margin-right: 8px;
}
td.indexkey {
background-color: #EBF6EB;
font-weight: bold;
border: 1px solid #C4E5C4;
margin: 2px 0px 2px 0;
padding: 2px 10px;
white-space: nowrap;
vertical-align: top;
}
td.indexvalue {
background-color: #EBF6EB;
border: 1px solid #C4E5C4;
padding: 2px 10px;
margin: 2px 0px;
}
tr.memlist {
background-color: #EEF7EE;
}
p.formulaDsp {
text-align: center;
}
img.formulaDsp {
}
img.formulaInl {
vertical-align: middle;
}
div.center {
text-align: center;
margin-top: 0px;
margin-bottom: 0px;
padding: 0px;
}
div.center img {
border: 0px;
}
#footer {
margin: -10px 1em 0;
padding-top: 20px;
}
address.footer {
background-color: #ffffff;
text-align: center;
}
img.footer {
border: 0px;
vertical-align: middle;
}
/* @group Code Colorization */
span.keyword {
color: #008000
}
span.keywordtype {
color: #604020
}
span.keywordflow {
color: #e08000
}
span.comment {
color: #800000
}
span.preprocessor {
color: #806020
}
span.stringliteral {
color: #002080
}
span.charliteral {
color: #008080
}
span.vhdldigit {
color: #ff00ff
}
span.vhdlchar {
color: #000000
}
span.vhdlkeyword {
color: #700070
}
span.vhdllogic {
color: #ff0000
}
/* @end */
/*
.search {
color: #003399;
font-weight: bold;
}
form.search {
margin-bottom: 0px;
margin-top: 0px;
}
input.search {
font-size: 75%;
color: #000080;
font-weight: normal;
background-color: #e8eef2;
}
*/
td.tiny {
font-size: 75%;
}
.dirtab {
padding: 4px;
border-collapse: collapse;
border: 1px solid #A3D7A3;
}
th.dirtab {
background: #EBF6EB;
font-weight: bold;
}
hr {
height: 0px;
border: none;
border-top: 1px solid #4AAA4A;
}
hr.footer {
height: 1px;
}
/* @group Member Descriptions */
table.memberdecls {
border-spacing: 0px;
padding: 0px;
}
.mdescLeft, .mdescRight,
.memItemLeft, .memItemRight,
.memTemplItemLeft, .memTemplItemRight, .memTemplParams {
background-color: #F9FCF9;
border: none;
margin: 4px;
padding: 1px 0 0 8px;
}
.mdescLeft, .mdescRight {
padding: 0px 8px 4px 8px;
color: #555;
}
.memItemLeft, .memItemRight, .memTemplParams {
border-top: 1px solid #C4E5C4;
}
.memItemLeft, .memTemplItemLeft {
white-space: nowrap;
}
.memItemRight {
width: 100%;
}
.memTemplParams {
color: #46A246;
white-space: nowrap;
}
/* @end */
/* @group Member Details */
/* Styles for detailed member documentation */
.memtemplate {
font-size: 80%;
color: #46A246;
font-weight: normal;
margin-left: 9px;
}
.memnav {
background-color: #EBF6EB;
border: 1px solid #A3D7A3;
text-align: center;
margin: 2px;
margin-right: 15px;
padding: 2px;
}
.mempage {
width: 100%;
}
.memitem {
padding: 0;
margin-bottom: 10px;
margin-right: 5px;
}
.memname {
white-space: nowrap;
font-weight: bold;
margin-left: 6px;
}
.memproto, dl.reflist dt {
border-top: 1px solid #A8D9A8;
border-left: 1px solid #A8D9A8;
border-right: 1px solid #A8D9A8;
padding: 6px 0px 6px 0px;
color: #255525;
font-weight: bold;
text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
/* opera specific markup */
box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
border-top-right-radius: 8px;
border-top-left-radius: 8px;
/* firefox specific markup */
-moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
-moz-border-radius-topright: 8px;
-moz-border-radius-topleft: 8px;
/* webkit specific markup */
-webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
-webkit-border-top-right-radius: 8px;
-webkit-border-top-left-radius: 8px;
background-image:url('nav_f.png');
background-repeat:repeat-x;
background-color: #E2F2E2;
}
.memdoc, dl.reflist dd {
border-bottom: 1px solid #A8D9A8;
border-left: 1px solid #A8D9A8;
border-right: 1px solid #A8D9A8;
padding: 2px 5px;
background-color: #FBFDFB;
border-top-width: 0;
/* opera specific markup */
border-bottom-left-radius: 8px;
border-bottom-right-radius: 8px;
box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
/* firefox specific markup */
-moz-border-radius-bottomleft: 8px;
-moz-border-radius-bottomright: 8px;
-moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
background-image: -moz-linear-gradient(center top, #FFFFFF 0%, #FFFFFF 60%, #F7FBF7 95%, #EEF7EE);
/* webkit specific markup */
-webkit-border-bottom-left-radius: 8px;
-webkit-border-bottom-right-radius: 8px;
-webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
background-image: -webkit-gradient(linear,center top,center bottom,from(#FFFFFF), color-stop(0.6,#FFFFFF), color-stop(0.60,#FFFFFF), color-stop(0.95,#F7FBF7), to(#EEF7EE));
}
dl.reflist dt {
padding: 5px;
}
dl.reflist dd {
margin: 0px 0px 10px 0px;
padding: 5px;
}
.paramkey {
text-align: right;
}
.paramtype {
white-space: nowrap;
}
.paramname {
color: #602020;
white-space: nowrap;
}
.paramname em {
font-style: normal;
}
.params, .retval, .exception, .tparams {
border-spacing: 6px 2px;
}
.params .paramname, .retval .paramname {
font-weight: bold;
vertical-align: top;
}
.params .paramtype {
font-style: italic;
vertical-align: top;
}
.params .paramdir {
font-family: "courier new",courier,monospace;
vertical-align: top;
}
/* @end */
/* @group Directory (tree) */
/* for the tree view */
.ftvtree {
font-family: sans-serif;
margin: 0px;
}
/* these are for tree view when used as main index */
.directory {
font-size: 9pt;
font-weight: bold;
margin: 5px;
}
.directory h3 {
margin: 0px;
margin-top: 1em;
font-size: 11pt;
}
/*
The following two styles can be used to replace the root node title
with an image of your choice. Simply uncomment the next two styles,
specify the name of your image and be sure to set 'height' to the
proper pixel height of your image.
*/
/*
.directory h3.swap {
height: 61px;
background-repeat: no-repeat;
background-image: url("yourimage.gif");
}
.directory h3.swap span {
display: none;
}
*/
.directory > h3 {
margin-top: 0;
}
.directory p {
margin: 0px;
white-space: nowrap;
}
.directory div {
display: none;
margin: 0px;
}
.directory img {
vertical-align: -30%;
}
/* these are for tree view when not used as main index */
.directory-alt {
font-size: 100%;
font-weight: bold;
}
.directory-alt h3 {
margin: 0px;
margin-top: 1em;
font-size: 11pt;
}
.directory-alt > h3 {
margin-top: 0;
}
.directory-alt p {
margin: 0px;
white-space: nowrap;
}
.directory-alt div {
display: none;
margin: 0px;
}
.directory-alt img {
vertical-align: -30%;
}
/* @end */
div.dynheader {
margin-top: 8px;
}
address {
font-style: normal;
color: #2A612A;
}
table.doxtable {
border-collapse:collapse;
}
table.doxtable td, table.doxtable th {
border: 1px solid #2D682D;
padding: 3px 7px 2px;
}
table.doxtable th {
background-color: #377F37;
color: #FFFFFF;
font-size: 110%;
padding-bottom: 4px;
padding-top: 5px;
text-align:left;
}
table.fieldtable {
width: 100%;
margin-bottom: 10px;
border: 1px solid #A8D9A8;
border-spacing: 0px;
-moz-border-radius: 4px;
-webkit-border-radius: 4px;
border-radius: 4px;
-moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
-webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
}
.fieldtable td, .fieldtable th {
padding: 3px 7px 2px;
}
.fieldtable td.fieldtype, .fieldtable td.fieldname {
white-space: nowrap;
border-right: 1px solid #A8D9A8;
border-bottom: 1px solid #A8D9A8;
vertical-align: top;
}
.fieldtable td.fielddoc {
border-bottom: 1px solid #A8D9A8;
width: 100%;
}
.fieldtable tr:last-child td {
border-bottom: none;
}
.fieldtable th {
background-image:url('nav_f.png');
background-repeat:repeat-x;
background-color: #E2F2E2;
font-size: 90%;
color: #255525;
padding-bottom: 4px;
padding-top: 5px;
text-align:left;
-moz-border-radius-topleft: 4px;
-moz-border-radius-topright: 4px;
-webkit-border-top-left-radius: 4px;
-webkit-border-top-right-radius: 4px;
border-top-left-radius: 4px;
border-top-right-radius: 4px;
border-bottom: 1px solid #A8D9A8;
}
.tabsearch {
top: 0px;
left: 10px;
height: 36px;
background-image: url('tab_b.png');
z-index: 101;
overflow: hidden;
font-size: 13px;
}
.navpath ul
{
font-size: 11px;
background-image:url('tab_b.png');
background-repeat:repeat-x;
height:30px;
line-height:30px;
color:#8ACC8A;
border:solid 1px #C2E4C2;
overflow:hidden;
margin:0px;
padding:0px;
}
.navpath li
{
list-style-type:none;
float:left;
padding-left:10px;
padding-right:15px;
background-image:url('bc_s.png');
background-repeat:no-repeat;
background-position:right;
color:#367C36;
}
.navpath li.navelem a
{
height:32px;
display:block;
text-decoration: none;
outline: none;
}
.navpath li.navelem a:hover
{
color:#68BD68;
}
.navpath li.footer
{
list-style-type:none;
float:right;
padding-left:10px;
padding-right:15px;
background-image:none;
background-repeat:no-repeat;
background-position:right;
color:#367C36;
font-size: 8pt;
}
div.summary
{
margin-top: 12px;
text-align: center;
}
div.summary a
{
white-space: nowrap;
}
div.ingroups
{
margin-left: 5px;
font-size: 8pt;
padding-left: 5px;
width: 50%;
text-align: left;
}
div.ingroups a
{
white-space: nowrap;
}
div.headertitle
{
padding: 5px 5px 5px 7px;
}
dl
{
padding: 0 0 0 10px;
}
dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug
{
border-left:4px solid;
padding: 0 0 0 6px;
}
dl.note
{
border-color: #D0C000;
}
dl.warning, dl.attention
{
border-color: #FF0000;
}
dl.pre, dl.post, dl.invariant
{
border-color: #00D000;
}
dl.deprecated
{
border-color: #505050;
}
dl.todo
{
border-color: #00C0E0;
}
dl.test
{
border-color: #3030E0;
}
dl.bug
{
border-color: #C08050;
}
#projectlogo
{
text-align: center;
vertical-align: bottom;
border-collapse: separate;
}
#projectlogo img
{
border: 0px none;
}
#projectname
{
font: 300% Tahoma, Arial,sans-serif;
margin: 0px;
padding: 2px 0px;
}
#projectbrief
{
font: 120% Tahoma, Arial,sans-serif;
margin: 0px;
padding: 0px;
}
#projectnumber
{
font: 50% Tahoma, Arial,sans-serif;
margin: 0px;
padding: 0px;
}
#titlearea
{
padding: 0px;
margin: 0px;
width: 100%;
border-bottom: 1px solid #53B453;
}
.image
{
text-align: center;
}
.dotgraph
{
text-align: center;
}
.mscgraph
{
text-align: center;
}
.caption
{
font-weight: bold;
}
div.zoom
{
border: 1px solid #90CE90;
}
dl.citelist {
margin-bottom:50px;
}
dl.citelist dt {
color:#337533;
float:left;
font-weight:bold;
margin-right:10px;
padding:5px;
}
dl.citelist dd {
margin:2px 0;
padding:5px 0;
}
@media print
{
#top { display: none; }
#side-nav { display: none; }
#nav-path { display: none; }
body { overflow:visible; }
h1, h2, h3, h4, h5, h6 { page-break-after: avoid; }
.summary { display: none; }
.memitem { page-break-inside: avoid; }
#doc-content
{
margin-left:0 !important;
height:auto !important;
width:auto !important;
overflow:inherit;
display:inline;
}
pre.fragment
{
overflow: visible;
text-wrap: unrestricted;
white-space: -moz-pre-wrap; /* Moz */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
white-space: pre-wrap; /* CSS3 */
word-wrap: break-word; /* IE 5.5+ */
}
}
/* tabs */
.tabs, .tabs2, .tabs3 {
z-index: 101;
}
.tablist {
margin: auto;
display: table;
}
.tablist li {
float: left;
display: table-cell;
list-style: none;
margin:0 4px;
}
.tablist a {
display: block;
padding: 0 0.3em;
color: #285D28;
text-decoration: none;
outline: none;
}
.tabs3 .tablist a {
padding: 0 10px;
}
/* libav.org stylesheet */
a {
color: #2D6198;
}
a:visited {
color: #884488;
}
h1 a, h2 a, h3 a {
text-decoration: inherit;
color: inherit;
}
#banner, #top {
background-color: #BBC9D8;
border-bottom: 1px solid #7A96B3;
border-top: 1px solid #7A96B3;
position: relative;
text-align: center;
}
#banner img, #top img {
padding-bottom: 1px;
padding-top: 5px;
}
#body {
margin: 0 1em;
}
body {
background-color: #313131;
margin: 0;
}
.center {
margin-left: auto;
margin-right: auto;
text-align: center;
}
#container {
background-color: white;
color: #202020;
margin-left: 1em;
margin-right: 1em;
}
h1 {
background-color: #7BB37B;
border: 1px solid #6A996A;
color: #151515;
font-size: 1.2em;
padding-bottom: 0.2em;
padding-left: 0.4em;
padding-top: 0.2em;
}
h2 {
color: #313131;
font-size: 1.2em;
}
h3 {
color: #313131;
font-size: 0.8em;
margin-bottom: -8px;
}
img {
border: 0;
}
#navrow1 {
margin-top: 12px;
border-top: 1px solid #5C665C;
}
#navrow1, #navrow2, #navrow3, #navrow4 {
background-color: #738073;
border-bottom: 1px solid #5C665C;
border-left: 1px solid #5C665C;
border-right: 1px solid #5C665C;
position: relative;
text-align: center;
}
#navrow1 a, #navrow2 a, #navrow3 a, #navrow4 a {
color: white;
padding: 0.3em;
text-decoration: none;
}
#navrow1 ul, #navrow2 ul, #navrow3 ul, #navrow4 ul {
padding: 0;
}
#navrow1 li.current a, #navrow2 li.current a, #navrow3 li.current a, #navrow4 li.current a {
background-color: #414141;
color: white;
text-decoration: none;
}
#navrow1 a:hover, #navrow2 a:hover, #navrow3 a:hover, #navrow4 a:hover {
background-color: #313131 !important;
color: white;
text-decoration: none;
}
p {
margin-left: 1em;
margin-right: 1em;
}
table {
margin-left: 2em;
}
pre {
margin-left: 2em;
}
#proj_desc {
font-size: 1.2em;
}
#repos {
margin-left: 1em;
margin-right: 1em;
border-collapse: collapse;
border: solid 1px #6A996A;
}
#repos th {
background-color: #7BB37B;
border: solid 1px #6A996A;
}
#repos td {
padding: 0.2em;
border: solid 1px #6A996A;
}
#distro_status {
margin-left: 1em;
margin-right: 1em;
border-collapse: collapse;
border: solid 1px #6A996A;
}
#distro_status th {
background-color: #7BB37B;
border: solid 1px #6A996A;
}
#distro_status td {
padding: 0.2em;
border: solid 1px #6A996A;
}
</div>
<div id="footer">
<!--BEGIN GENERATE_TREEVIEW-->
<li class="footer">$generatedby
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
</ul>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<address class="footer"><small>
$generatedby &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="$relpath$doxygen.png" alt="doxygen"/>
</a> $doxygenversion
</small></address>
<!--END !GENERATE_TREEVIEW-->
</div>
</div>
</body>
</html>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" />
$treeview
$search
$mathjax
</head>
<div id="container">
<div id="body">
<div>
......@@ -272,6 +272,26 @@ ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
rm temp[12].[av] all.[av]
@end example
@section -profile option fails when encoding H.264 video with AAC audio
@command{ffmpeg} prints an error like
@example
Undefined constant or missing '(' in 'baseline'
Unable to parse option value "baseline"
Error setting option profile to value baseline.
@end example
Short answer: write @option{-profile:v} instead of @option{-profile}.
Long answer: this happens because the @option{-profile} option can apply to both
video and audio. Specifically the AAC encoder also defines some profiles, none
of which are named @var{baseline}.
The solution is to apply the @option{-profile} option to the video stream only
by using @url{http://ffmpeg.org/ffmpeg.html#Stream-specifiers-1, Stream specifiers}.
Appending @code{:v} to it will do exactly that.
@chapter Development
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
......
......@@ -44,6 +44,7 @@
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
#include "libavutil/libm.h"
#include "libavutil/imgutils.h"
#include "libavformat/os_support.h"
#include "libswresample/swresample.h"
......@@ -54,6 +55,7 @@
# include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h"
# include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
# include "libavfilter/vsrc_buffer.h"
#endif
......@@ -172,6 +174,19 @@ static uint8_t *input_tmp= NULL;
#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
typedef struct FrameBuffer {
uint8_t *base[4];
uint8_t *data[4];
int linesize[4];
int h, w;
enum PixelFormat pix_fmt;
int refcount;
struct InputStream *ist;
struct FrameBuffer *next;
} FrameBuffer;
typedef struct InputStream {
int file_index;
AVStream *st;
......@@ -189,6 +204,9 @@ typedef struct InputStream {
int is_start; /* is 1 at the start and after a discontinuity */
int showed_multi_packet_warning;
AVDictionary *opts;
/* a pool of free buffers for decoded data */
FrameBuffer *buffer_pool;
} InputStream;
typedef struct InputFile {
......@@ -441,6 +459,131 @@ static void reset_options(OptionsContext *o, int is_input)
init_opts();
}
static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf)
{
AVCodecContext *s = ist->st->codec;
FrameBuffer *buf = av_mallocz(sizeof(*buf));
int ret;
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
int h_chroma_shift, v_chroma_shift;
int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
int w = s->width, h = s->height;
if (!buf)
return AVERROR(ENOMEM);
if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
w += 2*edge;
h += 2*edge;
}
avcodec_align_dimensions(s, &w, &h);
if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
s->pix_fmt, 32)) < 0) {
av_freep(&buf);
return ret;
}
/* XXX this shouldn't be needed, but some tests break without this line
* those decoders are buggy and need to be fixed.
* the following tests fail:
* bethsoft-vid, cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
*/
memset(buf->base[0], 128, ret);
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for (int i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
const int h_shift = i==0 ? 0 : h_chroma_shift;
const int v_shift = i==0 ? 0 : v_chroma_shift;
if (s->flags & CODEC_FLAG_EMU_EDGE)
buf->data[i] = buf->base[i];
else
buf->data[i] = buf->base[i] +
FFALIGN((buf->linesize[i]*edge >> v_shift) +
(pixel_size*edge >> h_shift), 32);
}
buf->w = s->width;
buf->h = s->height;
buf->pix_fmt = s->pix_fmt;
buf->ist = ist;
*pbuf = buf;
return 0;
}
static void free_buffer_pool(InputStream *ist)
{
FrameBuffer *buf = ist->buffer_pool;
while (buf) {
ist->buffer_pool = buf->next;
av_freep(&buf->base[0]);
av_free(buf);
buf = ist->buffer_pool;
}
}
static void unref_buffer(InputStream *ist, FrameBuffer *buf)
{
av_assert0(buf->refcount);
buf->refcount--;
if (!buf->refcount) {
buf->next = ist->buffer_pool;
ist->buffer_pool = buf;
}
}
static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf;
int ret, i;
if (!ist->buffer_pool && (ret = alloc_buffer(ist, &ist->buffer_pool)) < 0)
return ret;
buf = ist->buffer_pool;
ist->buffer_pool = buf->next;
buf->next = NULL;
if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
av_freep(&buf->base[0]);
av_free(buf);
if ((ret = alloc_buffer(ist, &buf)) < 0)
return ret;
}
buf->refcount++;
frame->opaque = buf;
frame->type = FF_BUFFER_TYPE_USER;
frame->extended_data = frame->data;
frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
frame->data[i] = buf->data[i];
frame->linesize[i] = buf->linesize[i];
}
return 0;
}
static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf = frame->opaque;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
frame->data[i] = NULL;
unref_buffer(ist, buf);
}
static void filter_release_buffer(AVFilterBuffer *fb)
{
FrameBuffer *buf = fb->priv;
av_free(fb);
unref_buffer(buf->ist, buf);
}
#if CONFIG_AVFILTER
static int configure_video_filters(InputStream *ist, OutputStream *ost)
......@@ -667,6 +810,7 @@ void av_noreturn exit_program(int ret)
av_freep(&input_streams[i].decoded_frame);
av_freep(&input_streams[i].filtered_frame);
av_dict_free(&input_streams[i].opts);
free_buffer_pool(&input_streams[i]);
}
if (vstats_file)
......
......@@ -1285,51 +1285,45 @@ typedef struct AVFrame {
uint8_t **extended_data;
/**
* frame timestamp estimated using various heuristics, in stream time base
* Code outside libavcodec should access this field using:
* av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
* sample aspect ratio for the video frame, 0/1 if unknown\unspecified
* - encoding: unused
* - decoding: set by libavcodec, read by user.
* - decoding: Read by user.
*/
int64_t best_effort_timestamp;
AVRational sample_aspect_ratio;
/**
* reordered pos from the last AVPacket that has been input into the decoder
* Code outside libavcodec should access this field using:
* av_opt_ptr(avcodec_get_frame_class(), frame, "pkt_pos");
* width and height of the video frame
* - encoding: unused
* - decoding: Read by user.
*/
int64_t pkt_pos;
int width, height;
/**
* reordered sample aspect ratio for the video frame, 0/1 if unknown\unspecified
* Code outside libavcodec should access this field using:
* av_opt_ptr(avcodec_get_frame_class(), frame, "sample_aspect_ratio");
* format of the frame, -1 if unknown or unset
* Values correspond to enum PixelFormat for video frames,
* enum AVSampleFormat for audio)
* - encoding: unused
* - decoding: Read by user.
*/
AVRational sample_aspect_ratio;
int format;
/**
* width and height of the video frame
* frame timestamp estimated using various heuristics, in stream time base
* Code outside libavcodec should access this field using:
* av_opt_ptr(avcodec_get_frame_class(), frame, "width");
* av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
* - encoding: unused
* - decoding: Read by user.
* - decoding: set by libavcodec, read by user.
*/
int width, height;
int64_t best_effort_timestamp;
/**
* format of the frame, -1 if unknown or unset
* It should be cast to the corresponding enum (enum PixelFormat
* for video, enum AVSampleFormat for audio)
* reordered pos from the last AVPacket that has been input into the decoder
* Code outside libavcodec should access this field using:
* av_opt_ptr(avcodec_get_frame_class(), frame, "format");
* av_opt_ptr(avcodec_get_frame_class(), frame, "pkt_pos");
* - encoding: unused
* - decoding: Read by user.
*/
int format;
int64_t pkt_pos;
} AVFrame;
......
......@@ -165,6 +165,15 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
return 0;
}
static av_cold int avs_decode_end(AVCodecContext *avctx)
{
AvsContext *s = avctx->priv_data;
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
return 0;
}
AVCodec ff_avs_decoder = {
.name = "avs",
.type = AVMEDIA_TYPE_VIDEO,
......@@ -172,6 +181,7 @@ AVCodec ff_avs_decoder = {
.priv_data_size = sizeof(AvsContext),
.init = avs_decode_init,
.decode = avs_decode_frame,
.close = avs_decode_end,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("AVS (Audio Video Standard) video"),
};
......@@ -1098,25 +1098,30 @@ int ff_find_unused_picture(MpegEncContext *s, int shared)
return -1;
}
static void update_noise_reduction(MpegEncContext *s){
static void update_noise_reduction(MpegEncContext *s)
{
int intra, i;
for(intra=0; intra<2; intra++){
if(s->dct_count[intra] > (1<<16)){
for(i=0; i<64; i++){
s->dct_error_sum[intra][i] >>=1;
for (intra = 0; intra < 2; intra++) {
if (s->dct_count[intra] > (1 << 16)) {
for (i = 0; i < 64; i++) {
s->dct_error_sum[intra][i] >>= 1;
}
s->dct_count[intra] >>= 1;
}
for(i=0; i<64; i++){
s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
for (i = 0; i < 64; i++) {
s->dct_offset[intra][i] = (s->avctx->noise_reduction *
s->dct_count[intra] +
s->dct_error_sum[intra][i] / 2) /
(s->dct_error_sum[intra][i] + 1);
}
}
}
/**
* generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
* generic function for encode/decode called after coding/decoding
* the header and before a frame is coded/decoded.
*/
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
......@@ -1124,21 +1129,27 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
Picture *pic;
s->mb_skipped = 0;
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
s->codec_id == CODEC_ID_SVQ3);
/* mark&release old frames */
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
/* mark & release old frames */
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
s->last_picture_ptr != s->next_picture_ptr &&
s->last_picture_ptr->f.data[0]) {
if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
if (s->last_picture_ptr->owner2 == s)
free_frame_buffer(s, s->last_picture_ptr);
/* release forgotten pictures */
/* if(mpeg124/h263) */
if(!s->encoding){
for(i=0; i<s->picture_count; i++){
if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
/* if (mpeg124/h263) */
if (!s->encoding) {
for (i = 0; i < s->picture_count; i++) {
if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
&s->picture[i] != s->next_picture_ptr &&
s->picture[i].f.reference) {
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
av_log(avctx, AV_LOG_ERROR,
"releasing zombie picture\n");
free_frame_buffer(s, &s->picture[i]);
}
}
......@@ -1146,20 +1157,23 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
}
}
if(!s->encoding){
if (!s->encoding) {
ff_release_unused_pictures(s, 1);
if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
else{
i= ff_find_unused_picture(s, 0);
if (s->current_picture_ptr &&
s->current_picture_ptr->f.data[0] == NULL) {
// we already have a unused image
// (maybe it was set before reading the header)
pic = s->current_picture_ptr;
} else {
i = ff_find_unused_picture(s, 0);
if (i < 0)
return i;
pic= &s->picture[i];
pic = &s->picture[i];
}
pic->f.reference = 0;
if (!s->dropable){
if (!s->dropable) {
if (s->codec_id == CODEC_ID_H264)
pic->f.reference = s->picture_structure;
else if (s->pict_type != AV_PICTURE_TYPE_B)
......@@ -1168,53 +1182,61 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
pic->f.coded_picture_number = s->coded_picture_number++;
if(ff_alloc_picture(s, pic, 0) < 0)
if (ff_alloc_picture(s, pic, 0) < 0)
return -1;
s->current_picture_ptr= pic;
//FIXME use only the vars from current_pic
s->current_picture_ptr = pic;
// FIXME use only the vars from current_pic
s->current_picture_ptr->f.top_field_first = s->top_field_first;
if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
if(s->picture_structure != PICT_FRAME)
s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
}
s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
s->codec_id == CODEC_ID_MPEG2VIDEO) {
if (s->picture_structure != PICT_FRAME)
s->current_picture_ptr->f.top_field_first =
(s->picture_structure == PICT_TOP_FIELD) == s->first_field;
}
s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
!s->progressive_sequence;
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
}
s->current_picture_ptr->f.pict_type = s->pict_type;
// if(s->flags && CODEC_FLAG_QSCALE)
// s->current_picture_ptr->quality= s->new_picture_ptr->quality;
// if (s->flags && CODEC_FLAG_QSCALE)
// s->current_picture_ptr->quality = s->new_picture_ptr->quality;
s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
ff_copy_picture(&s->current_picture, s->current_picture_ptr);
if (s->pict_type != AV_PICTURE_TYPE_B) {
s->last_picture_ptr= s->next_picture_ptr;
if(!s->dropable)
s->next_picture_ptr= s->current_picture_ptr;
s->last_picture_ptr = s->next_picture_ptr;
if (!s->dropable)
s->next_picture_ptr = s->current_picture_ptr;
}
/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
s->pict_type, s->dropable);*/
s->pict_type, s->dropable); */
if(s->codec_id != CODEC_ID_H264){
if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
(s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
if (s->codec_id != CODEC_ID_H264) {
if ((s->last_picture_ptr == NULL ||
s->last_picture_ptr->f.data[0] == NULL) &&
(s->pict_type != AV_PICTURE_TYPE_I ||
s->picture_structure != PICT_FRAME)) {
if (s->pict_type != AV_PICTURE_TYPE_I)
av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
av_log(avctx, AV_LOG_ERROR,
"warning: first frame is no keyframe\n");
else if (s->picture_structure != PICT_FRAME)
av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
av_log(avctx, AV_LOG_INFO,
"allocate dummy last picture for field based first keyframe\n");
/* Allocate a dummy frame */
i= ff_find_unused_picture(s, 0);
i = ff_find_unused_picture(s, 0);
if (i < 0)
return i;
s->last_picture_ptr= &s->picture[i];
s->last_picture_ptr->f.key_frame = 0;
if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
return -1;
if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
......@@ -1222,33 +1244,43 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
}
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
INT_MAX, 0);
ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
INT_MAX, 1);
}
if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
if ((s->next_picture_ptr == NULL ||
s->next_picture_ptr->f.data[0] == NULL) &&
s->pict_type == AV_PICTURE_TYPE_B) {
/* Allocate a dummy frame */
i= ff_find_unused_picture(s, 0);
i = ff_find_unused_picture(s, 0);
if (i < 0)
return i;
s->next_picture_ptr= &s->picture[i];
s->next_picture_ptr->f.key_frame = 0;
if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
return -1;
ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
INT_MAX, 0);
ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
INT_MAX, 1);
}
}
if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
if (s->last_picture_ptr)
ff_copy_picture(&s->last_picture, s->last_picture_ptr);
if (s->next_picture_ptr)
ff_copy_picture(&s->next_picture, s->next_picture_ptr);
assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
s->last_picture_ptr->f.data[0]));
if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
int i;
for(i=0; i<4; i++){
if(s->picture_structure == PICT_BOTTOM_FIELD){
s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
for (i = 0; i < 4; i++) {
if (s->picture_structure == PICT_BOTTOM_FIELD) {
s->current_picture.f.data[i] +=
s->current_picture.f.linesize[i];
}
s->current_picture.f.linesize[i] *= 2;
s->last_picture.f.linesize[i] *= 2;
......@@ -1258,81 +1290,86 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->err_recognition = avctx->err_recognition;
/* set dequantizer, we can't do it during init as it might change for mpeg4
and we can't do it in the header decode as init is not called for mpeg4 there yet */
if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
/* set dequantizer, we can't do it during init as
* it might change for mpeg4 and we can't do it in the header
* decode as init is not called for mpeg4 there yet */
if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
}else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
} else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
}else{
} else {
s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
}
if(s->dct_error_sum){
if (s->dct_error_sum) {
assert(s->avctx->noise_reduction && s->encoding);
update_noise_reduction(s);
}
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
return ff_xvmc_field_start(s, avctx);
return 0;
}
/* generic function for encode/decode called after a frame has been coded/decoded */
/* generic function for encode/decode called after a
* frame has been coded/decoded. */
void MPV_frame_end(MpegEncContext *s)
{
int i;
/* redraw edges for the frame if decoding didn't complete */
//just to make sure that all data is rendered.
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
// just to make sure that all data is rendered.
if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
ff_xvmc_field_end(s);
}else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
&& !s->avctx->hwaccel
&& !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
&& s->unrestricted_mv
&& s->current_picture.f.reference
&& !s->intra_only
&& !(s->flags&CODEC_FLAG_EMU_EDGE)) {
} else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
!s->avctx->hwaccel &&
!(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
s->unrestricted_mv &&
s->current_picture.f.reference &&
!s->intra_only &&
!(s->flags & CODEC_FLAG_EMU_EDGE)) {
int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
s->h_edge_pos , s->v_edge_pos,
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
s->h_edge_pos, s->v_edge_pos,
EDGE_WIDTH, EDGE_WIDTH,
EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
}
emms_c();
s->last_pict_type = s->pict_type;
s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
if(s->pict_type!=AV_PICTURE_TYPE_B){
s->last_non_b_pict_type= s->pict_type;
s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
if (s->pict_type!= AV_PICTURE_TYPE_B) {
s->last_non_b_pict_type = s->pict_type;
}
#if 0
/* copy back current_picture variables */
for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
s->picture[i]= s->current_picture;
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
s->picture[i] = s->current_picture;
break;
}
}
assert(i<MAX_PICTURE_COUNT);
assert(i < MAX_PICTURE_COUNT);
#endif
if(s->encoding){
if (s->encoding) {
/* release non-reference frames */
for(i=0; i<s->picture_count; i++){
if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
for (i = 0; i < s->picture_count; i++) {
if (s->picture[i].f.data[0] && !s->picture[i].f.reference
/* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
free_frame_buffer(s, &s->picture[i]);
}
}
......@@ -1343,10 +1380,11 @@ void MPV_frame_end(MpegEncContext *s)
memset(&s->next_picture, 0, sizeof(Picture));
memset(&s->current_picture, 0, sizeof(Picture));
#endif
s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
s->mb_height - 1, 0);
}
}
......@@ -1357,44 +1395,48 @@ void MPV_frame_end(MpegEncContext *s)
* @param stride stride/linesize of the image
* @param color color of the arrow
*/
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
int w, int h, int stride, int color)
{
int x, y, fr, f;
sx= av_clip(sx, 0, w-1);
sy= av_clip(sy, 0, h-1);
ex= av_clip(ex, 0, w-1);
ey= av_clip(ey, 0, h-1);
sx = av_clip(sx, 0, w - 1);
sy = av_clip(sy, 0, h - 1);
ex = av_clip(ex, 0, w - 1);
ey = av_clip(ey, 0, h - 1);
buf[sy*stride + sx]+= color;
buf[sy * stride + sx] += color;
if(FFABS(ex - sx) > FFABS(ey - sy)){
if(sx > ex){
if (FFABS(ex - sx) > FFABS(ey - sy)) {
if (sx > ex) {
FFSWAP(int, sx, ex);
FFSWAP(int, sy, ey);
}
buf+= sx + sy*stride;
ex-= sx;
f= ((ey-sy)<<16)/ex;
buf += sx + sy * stride;
ex -= sx;
f = ((ey - sy) << 16) / ex;
for(x= 0; x <= ex; x++){
y = (x*f)>>16;
fr= (x*f)&0xFFFF;
buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
buf[(y+1)*stride + x]+= (color* fr )>>16;
y = (x * f) >> 16;
fr = (x * f) & 0xFFFF;
buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
buf[(y + 1) * stride + x] += (color * fr ) >> 16;
}
}else{
if(sy > ey){
} else {
if (sy > ey) {
FFSWAP(int, sx, ex);
FFSWAP(int, sy, ey);
}
buf+= sx + sy*stride;
ey-= sy;
if(ey) f= ((ex-sx)<<16)/ey;
else f= 0;
buf += sx + sy * stride;
ey -= sy;
if (ey)
f = ((ex - sx) << 16) / ey;
else
f = 0;
for(y= 0; y <= ey; y++){
x = (y*f)>>16;
fr= (y*f)&0xFFFF;
buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
buf[y*stride + x+1]+= (color* fr )>>16;
x = (y*f) >> 16;
fr = (y*f) & 0xFFFF;
buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
buf[y * stride + x + 1] += (color * fr ) >> 16;
}
}
}
......@@ -1406,25 +1448,27 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h
* @param stride stride/linesize of the image
* @param color color of the arrow
*/
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
int ey, int w, int h, int stride, int color)
{
int dx,dy;
sx= av_clip(sx, -100, w+100);
sy= av_clip(sy, -100, h+100);
ex= av_clip(ex, -100, w+100);
ey= av_clip(ey, -100, h+100);
sx = av_clip(sx, -100, w + 100);
sy = av_clip(sy, -100, h + 100);
ex = av_clip(ex, -100, w + 100);
ey = av_clip(ey, -100, h + 100);
dx= ex - sx;
dy= ey - sy;
dx = ex - sx;
dy = ey - sy;
if(dx*dx + dy*dy > 3*3){
int rx= dx + dy;
int ry= -dx + dy;
int length= ff_sqrt((rx*rx + ry*ry)<<8);
if (dx * dx + dy * dy > 3 * 3) {
int rx = dx + dy;
int ry = -dx + dy;
int length = ff_sqrt((rx * rx + ry * ry) << 8);
//FIXME subpixel accuracy
rx= ROUNDED_DIV(rx*3<<4, length);
ry= ROUNDED_DIV(ry*3<<4, length);
// FIXME subpixel accuracy
rx = ROUNDED_DIV(rx * 3 << 4, length);
ry = ROUNDED_DIV(ry * 3 << 4, length);
draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
......@@ -1433,257 +1477,290 @@ static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int
}
/**
* Print debuging info for the given picture.
* Print debugging info for the given picture.
*/
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
{
if (s->avctx->hwaccel || !pict || !pict->mb_type)
return;
if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
int x,y;
av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
av_get_picture_type_char(pict->pict_type));
for(y=0; y<s->mb_height; y++){
for(x=0; x<s->mb_width; x++){
if(s->avctx->debug&FF_DEBUG_SKIP){
int count= s->mbskip_table[x + y*s->mb_stride];
if(count>9) count=9;
for (y = 0; y < s->mb_height; y++) {
for (x = 0; x < s->mb_width; x++) {
if (s->avctx->debug & FF_DEBUG_SKIP) {
int count = s->mbskip_table[x + y * s->mb_stride];
if (count > 9)
count = 9;
av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
}
if(s->avctx->debug&FF_DEBUG_QP){
av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
if (s->avctx->debug & FF_DEBUG_QP) {
av_log(s->avctx, AV_LOG_DEBUG, "%2d",
pict->qscale_table[x + y * s->mb_stride]);
}
if(s->avctx->debug&FF_DEBUG_MB_TYPE){
int mb_type= pict->mb_type[x + y*s->mb_stride];
//Type & MV direction
if(IS_PCM(mb_type))
if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
int mb_type = pict->mb_type[x + y * s->mb_stride];
// Type & MV direction
if (IS_PCM(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "P");
else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "A");
else if(IS_INTRA4x4(mb_type))
else if (IS_INTRA4x4(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "i");
else if(IS_INTRA16x16(mb_type))
else if (IS_INTRA16x16(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "I");
else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "d");
else if(IS_DIRECT(mb_type))
else if (IS_DIRECT(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "D");
else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "g");
else if(IS_GMC(mb_type))
else if (IS_GMC(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "G");
else if(IS_SKIP(mb_type))
else if (IS_SKIP(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "S");
else if(!USES_LIST(mb_type, 1))
else if (!USES_LIST(mb_type, 1))
av_log(s->avctx, AV_LOG_DEBUG, ">");
else if(!USES_LIST(mb_type, 0))
else if (!USES_LIST(mb_type, 0))
av_log(s->avctx, AV_LOG_DEBUG, "<");
else{
else {
assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
av_log(s->avctx, AV_LOG_DEBUG, "X");
}
//segmentation
if(IS_8X8(mb_type))
// segmentation
if (IS_8X8(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "+");
else if(IS_16X8(mb_type))
else if (IS_16X8(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "-");
else if(IS_8X16(mb_type))
else if (IS_8X16(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "|");
else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, " ");
else
av_log(s->avctx, AV_LOG_DEBUG, "?");
if(IS_INTERLACED(mb_type))
if (IS_INTERLACED(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "=");
else
av_log(s->avctx, AV_LOG_DEBUG, " ");
}
// av_log(s->avctx, AV_LOG_DEBUG, " ");
// av_log(s->avctx, AV_LOG_DEBUG, " ");
}
av_log(s->avctx, AV_LOG_DEBUG, "\n");
}
}
if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
s->avctx->debug_mv) {
const int shift= 1 + s->quarter_sample;
(s->avctx->debug_mv)) {
const int shift = 1 + s->quarter_sample;
int mb_y;
uint8_t *ptr;
int i;
int h_chroma_shift, v_chroma_shift, block_height;
const int width = s->avctx->width;
const int height= s->avctx->height;
const int mv_sample_log2= 4 - pict->motion_subsample_log2;
const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
s->low_delay=0; //needed to see the vectors without trashing the buffers
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for(i=0; i<3; i++){
memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
pict->data[i]= s->visualization_buffer[i];
const int height = s->avctx->height;
const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
const int mv_stride = (s->mb_width << mv_sample_log2) +
(s->codec_id == CODEC_ID_H264 ? 0 : 1);
s->low_delay = 0; // needed to see the vectors without trashing the buffers
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
&h_chroma_shift, &v_chroma_shift);
for (i = 0; i < 3; i++) {
memcpy(s->visualization_buffer[i], pict->data[i],
(i == 0) ? pict->linesize[i] * height:
pict->linesize[i] * height >> v_chroma_shift);
pict->data[i] = s->visualization_buffer[i];
}
pict->type= FF_BUFFER_TYPE_COPY;
pict->type = FF_BUFFER_TYPE_COPY;
pict->opaque= NULL;
ptr= pict->data[0];
block_height = 16>>v_chroma_shift;
ptr = pict->data[0];
block_height = 16 >> v_chroma_shift;
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
int mb_x;
for(mb_x=0; mb_x<s->mb_width; mb_x++){
const int mb_index= mb_x + mb_y*s->mb_stride;
if (s->avctx->debug_mv && pict->motion_val) {
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
const int mb_index = mb_x + mb_y * s->mb_stride;
if ((s->avctx->debug_mv) && pict->motion_val) {
int type;
for(type=0; type<3; type++){
for (type = 0; type < 3; type++) {
int direction = 0;
switch (type) {
case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
case 0:
if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
(pict->pict_type!= AV_PICTURE_TYPE_P))
continue;
direction = 0;
break;
case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
case 1:
if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
(pict->pict_type!= AV_PICTURE_TYPE_B))
continue;
direction = 0;
break;
case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
case 2:
if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
(pict->pict_type!= AV_PICTURE_TYPE_B))
continue;
direction = 1;
break;
}
if(!USES_LIST(pict->mb_type[mb_index], direction))
if (!USES_LIST(pict->mb_type[mb_index], direction))
continue;
if(IS_8X8(pict->mb_type[mb_index])){
if (IS_8X8(pict->mb_type[mb_index])) {
int i;
for(i=0; i<4; i++){
int sx= mb_x*16 + 4 + 8*(i&1);
int sy= mb_y*16 + 4 + 8*(i>>1);
int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
}
}else if(IS_16X8(pict->mb_type[mb_index])){
for (i = 0; i < 4; i++) {
int sx = mb_x * 16 + 4 + 8 * (i & 1);
int sy = mb_y * 16 + 4 + 8 * (i >> 1);
int xy = (mb_x * 2 + (i & 1) +
(mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
draw_arrow(ptr, sx, sy, mx, my, width,
height, s->linesize, 100);
}
} else if (IS_16X8(pict->mb_type[mb_index])) {
int i;
for(i=0; i<2; i++){
int sx=mb_x*16 + 8;
int sy=mb_y*16 + 4 + 8*i;
int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
int mx=(pict->motion_val[direction][xy][0]>>shift);
int my=(pict->motion_val[direction][xy][1]>>shift);
for (i = 0; i < 2; i++) {
int sx = mb_x * 16 + 8;
int sy = mb_y * 16 + 4 + 8 * i;
int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
int mx = (pict->motion_val[direction][xy][0] >> shift);
int my = (pict->motion_val[direction][xy][1] >> shift);
if(IS_INTERLACED(pict->mb_type[mb_index]))
my*=2;
if (IS_INTERLACED(pict->mb_type[mb_index]))
my *= 2;
draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
height, s->linesize, 100);
}
}else if(IS_8X16(pict->mb_type[mb_index])){
} else if (IS_8X16(pict->mb_type[mb_index])) {
int i;
for(i=0; i<2; i++){
int sx=mb_x*16 + 4 + 8*i;
int sy=mb_y*16 + 8;
int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
int mx=(pict->motion_val[direction][xy][0]>>shift);
int my=(pict->motion_val[direction][xy][1]>>shift);
for (i = 0; i < 2; i++) {
int sx = mb_x * 16 + 4 + 8 * i;
int sy = mb_y * 16 + 8;
int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
int mx = pict->motion_val[direction][xy][0] >> shift;
int my = pict->motion_val[direction][xy][1] >> shift;
if(IS_INTERLACED(pict->mb_type[mb_index]))
my*=2;
if (IS_INTERLACED(pict->mb_type[mb_index]))
my *= 2;
draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
height, s->linesize, 100);
}
}else{
int sx= mb_x*16 + 8;
int sy= mb_y*16 + 8;
int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
} else {
int sx= mb_x * 16 + 8;
int sy= mb_y * 16 + 8;
int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
}
}
}
if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
0x0101010101010101ULL;
int y;
for(y=0; y<block_height; y++){
*(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
*(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
}
}
if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
int mb_type= pict->mb_type[mb_index];
for (y = 0; y < block_height; y++) {
*(uint64_t *)(pict->data[1] + 8 * mb_x +
(block_height * mb_y + y) *
pict->linesize[1]) = c;
*(uint64_t *)(pict->data[2] + 8 * mb_x +
(block_height * mb_y + y) *
pict->linesize[2]) = c;
}
}
if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
pict->motion_val) {
int mb_type = pict->mb_type[mb_index];
uint64_t u,v;
int y;
#define COLOR(theta, r)\
u= (int)(128 + r*cos(theta*3.141592/180));\
v= (int)(128 + r*sin(theta*3.141592/180));
u=v=128;
if(IS_PCM(mb_type)){
COLOR(120,48)
}else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
COLOR(30,48)
}else if(IS_INTRA4x4(mb_type)){
COLOR(90,48)
}else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
// COLOR(120,48)
}else if(IS_DIRECT(mb_type)){
COLOR(150,48)
}else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
COLOR(170,48)
}else if(IS_GMC(mb_type)){
COLOR(190,48)
}else if(IS_SKIP(mb_type)){
// COLOR(180,48)
}else if(!USES_LIST(mb_type, 1)){
COLOR(240,48)
}else if(!USES_LIST(mb_type, 0)){
COLOR(0,48)
}else{
#define COLOR(theta, r) \
u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
v = (int)(128 + r * sin(theta * 3.141592 / 180));
u = v = 128;
if (IS_PCM(mb_type)) {
COLOR(120, 48)
} else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
IS_INTRA16x16(mb_type)) {
COLOR(30, 48)
} else if (IS_INTRA4x4(mb_type)) {
COLOR(90, 48)
} else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
// COLOR(120, 48)
} else if (IS_DIRECT(mb_type)) {
COLOR(150, 48)
} else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
COLOR(170, 48)
} else if (IS_GMC(mb_type)) {
COLOR(190, 48)
} else if (IS_SKIP(mb_type)) {
// COLOR(180, 48)
} else if (!USES_LIST(mb_type, 1)) {
COLOR(240, 48)
} else if (!USES_LIST(mb_type, 0)) {
COLOR(0, 48)
} else {
assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
COLOR(300,48)
}
u*= 0x0101010101010101ULL;
v*= 0x0101010101010101ULL;
for(y=0; y<block_height; y++){
*(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
*(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
u *= 0x0101010101010101ULL;
v *= 0x0101010101010101ULL;
for (y = 0; y < block_height; y++) {
*(uint64_t *)(pict->data[1] + 8 * mb_x +
(block_height * mb_y + y) * pict->linesize[1]) = u;
*(uint64_t *)(pict->data[2] + 8 * mb_x +
(block_height * mb_y + y) * pict->linesize[2]) = v;
}
//segmentation
if(IS_8X8(mb_type) || IS_16X8(mb_type)){
*(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
*(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
}
if(IS_8X8(mb_type) || IS_8X16(mb_type)){
for(y=0; y<16; y++)
pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
// segmentation
if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
*(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
(16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
*(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
(16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
}
if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
int dm= 1 << (mv_sample_log2-2);
for(i=0; i<4; i++){
int sx= mb_x*16 + 8*(i&1);
int sy= mb_y*16 + 8*(i>>1);
int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
//FIXME bidir
int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
for(y=0; y<8; y++)
pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
*(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
for (y = 0; y < 16; y++)
pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
pict->linesize[0]] ^= 0x80;
}
}
if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
int dm = 1 << (mv_sample_log2 - 2);
for (i = 0; i < 4; i++) {
int sx = mb_x * 16 + 8 * (i & 1);
int sy = mb_y * 16 + 8 * (i >> 1);
int xy = (mb_x * 2 + (i & 1) +
(mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
// FIXME bidir
int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
if (mv[0] != mv[dm] ||
mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
for (y = 0; y < 8; y++)
pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
*(uint64_t *)(pict->data[0] + sx + (sy + 4) *
pict->linesize[0]) ^= 0x8080808080808080ULL;
}
}
if (IS_INTERLACED(mb_type) &&
s->codec_id == CODEC_ID_H264) {
// hmm
}
}
s->mbskip_table[mb_index]=0;
s->mbskip_table[mb_index] = 0;
}
}
}
......@@ -1698,35 +1775,38 @@ static inline int hpel_motion_lowres(MpegEncContext *s,
int w, int h, h264_chroma_mc_func *pix_op,
int motion_x, int motion_y)
{
const int lowres= s->avctx->lowres;
const int op_index= FFMIN(lowres, 2);
const int s_mask= (2<<lowres)-1;
int emu=0;
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres, 2);
const int s_mask = (2 << lowres) - 1;
int emu = 0;
int sx, sy;
if(s->quarter_sample){
motion_x/=2;
motion_y/=2;
if (s->quarter_sample) {
motion_x /= 2;
motion_y /= 2;
}
sx= motion_x & s_mask;
sy= motion_y & s_mask;
src_x += motion_x >> (lowres+1);
src_y += motion_y >> (lowres+1);
sx = motion_x & s_mask;
sy = motion_y & s_mask;
src_x += motion_x >> lowres + 1;
src_y += motion_y >> lowres + 1;
src += src_y * stride + src_x;
if( (unsigned)src_x > h_edge_pos - (!!sx) - w
|| (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
src= s->edge_emu_buffer;
emu=1;
if ((unsigned)src_x > h_edge_pos - (!!sx) - w ||
(unsigned)src_y > (v_edge_pos >> field_based) - (!!sy) - h) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
(h + 1) << field_based, src_x,
src_y << field_based,
h_edge_pos,
v_edge_pos);
src = s->edge_emu_buffer;
emu = 1;
}
sx= (sx << 2) >> lowres;
sy= (sy << 2) >> lowres;
if(field_select)
sx = (sx << 2) >> lowres;
sy = (sy << 2) >> lowres;
if (field_select)
src += s->linesize;
pix_op[op_index](dest, src, stride, h, sx, sy);
return emu;
......@@ -1734,56 +1814,65 @@ static inline int hpel_motion_lowres(MpegEncContext *s,
/* apply one mpeg motion vector to the three components */
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int field_based, int bottom_field, int field_select,
uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
int motion_x, int motion_y, int h, int mb_y)
uint8_t *dest_y,
uint8_t *dest_cb,
uint8_t *dest_cr,
int field_based,
int bottom_field,
int field_select,
uint8_t **ref_picture,
h264_chroma_mc_func *pix_op,
int motion_x, int motion_y,
int h, int mb_y)
{
uint8_t *ptr_y, *ptr_cb, *ptr_cr;
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
const int lowres= s->avctx->lowres;
const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
const int block_s= 8>>lowres;
const int s_mask= (2<<lowres)-1;
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
uvsx, uvsy;
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
const int block_s = 8>>lowres;
const int s_mask = (2 << lowres) - 1;
const int h_edge_pos = s->h_edge_pos >> lowres;
const int v_edge_pos = s->v_edge_pos >> lowres;
linesize = s->current_picture.f.linesize[0] << field_based;
uvlinesize = s->current_picture.f.linesize[1] << field_based;
if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
motion_x/=2;
motion_y/=2;
// FIXME obviously not perfect but qpel will not work in lowres anyway
if (s->quarter_sample) {
motion_x /= 2;
motion_y /= 2;
}
if(field_based){
motion_y += (bottom_field - field_select)*((1<<lowres)-1);
motion_y += (bottom_field - field_select)*((1 << lowres)-1);
}
sx= motion_x & s_mask;
sy= motion_y & s_mask;
src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
sx = motion_x & s_mask;
sy = motion_y & s_mask;
src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
if (s->out_format == FMT_H263) {
uvsx = ((motion_x>>1) & s_mask) | (sx&1);
uvsy = ((motion_y>>1) & s_mask) | (sy&1);
uvsrc_x = src_x>>1;
uvsrc_y = src_y>>1;
}else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
uvsrc_x = src_x >> 1;
uvsrc_y = src_y >> 1;
} else if (s->out_format == FMT_H261) {
// even chroma mv's are full pel in H261
mx = motion_x / 4;
my = motion_y / 4;
uvsx = (2*mx) & s_mask;
uvsy = (2*my) & s_mask;
uvsrc_x = s->mb_x*block_s + (mx >> lowres);
uvsrc_y = mb_y*block_s + (my >> lowres);
uvsx = (2 * mx) & s_mask;
uvsy = (2 * my) & s_mask;
uvsrc_x = s->mb_x * block_s + (mx >> lowres);
uvsrc_y = mb_y * block_s + (my >> lowres);
} else {
if(s->chroma_y_shift){
mx = motion_x / 2;
my = motion_y / 2;
uvsx = mx & s_mask;
uvsy = my & s_mask;
uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
} else {
if(s->chroma_x_shift){
//Chroma422
......@@ -1806,96 +1895,106 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
|| (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
if ((unsigned) src_x > h_edge_pos - (!!sx) - 2 * block_s ||
(unsigned) src_y > (v_edge_pos >> field_based) - (!!sy) - h) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
s->linesize, 17, 17 + field_based,
src_x, src_y << field_based, h_edge_pos,
v_edge_pos);
ptr_y = s->edge_emu_buffer;
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
ptr_cb= uvbuf;
ptr_cr= uvbuf+16;
}
}
if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
9 + field_based,
uvsrc_x, uvsrc_y << field_based,
h_edge_pos >> 1, v_edge_pos >> 1);
s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
9 + field_based,
uvsrc_x, uvsrc_y << field_based,
h_edge_pos >> 1, v_edge_pos >> 1);
ptr_cb = uvbuf;
ptr_cr = uvbuf + 16;
}
}
// FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
if (bottom_field) {
dest_y += s->linesize;
dest_cb+= s->uvlinesize;
dest_cr+= s->uvlinesize;
dest_cb += s->uvlinesize;
dest_cr += s->uvlinesize;
}
if(field_select){
if (field_select) {
ptr_y += s->linesize;
ptr_cb+= s->uvlinesize;
ptr_cr+= s->uvlinesize;
ptr_cb += s->uvlinesize;
ptr_cr += s->uvlinesize;
}
sx= (sx << 2) >> lowres;
sy= (sy << 2) >> lowres;
pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
sx = (sx << 2) >> lowres;
sy = (sy << 2) >> lowres;
pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
uvsx= (uvsx << 2) >> lowres;
uvsy= (uvsy << 2) >> lowres;
if(h >> s->chroma_y_shift){
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
uvsx = (uvsx << 2) >> lowres;
uvsy = (uvsy << 2) >> lowres;
if (h >> s->chroma_y_shift) {
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
}
}
//FIXME h261 lowres loop filter
// FIXME h261 lowres loop filter
}
static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
uint8_t *dest_cb, uint8_t *dest_cr,
uint8_t **ref_picture,
h264_chroma_mc_func *pix_op,
int mx, int my){
const int lowres= s->avctx->lowres;
const int op_index= FFMIN(lowres, 2);
const int block_s= 8>>lowres;
const int s_mask= (2<<lowres)-1;
const int h_edge_pos = s->h_edge_pos >> (lowres+1);
const int v_edge_pos = s->v_edge_pos >> (lowres+1);
int emu=0, src_x, src_y, offset, sx, sy;
h264_chroma_mc_func * pix_op,
int mx, int my)
{
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres, 2);
const int block_s = 8 >> lowres;
const int s_mask = (2 << lowres) - 1;
const int h_edge_pos = s->h_edge_pos >> lowres + 1;
const int v_edge_pos = s->v_edge_pos >> lowres + 1;
int emu = 0, src_x, src_y, offset, sx, sy;
uint8_t *ptr;
if(s->quarter_sample){
mx/=2;
my/=2;
if (s->quarter_sample) {
mx /= 2;
my /= 2;
}
/* In case of 8X8, we construct a single chroma motion vector
with a special rounding */
mx= ff_h263_round_chroma(mx);
my= ff_h263_round_chroma(my);
mx = ff_h263_round_chroma(mx);
my = ff_h263_round_chroma(my);
sx= mx & s_mask;
sy= my & s_mask;
src_x = s->mb_x*block_s + (mx >> (lowres+1));
src_y = s->mb_y*block_s + (my >> (lowres+1));
sx = mx & s_mask;
sy = my & s_mask;
src_x = s->mb_x * block_s + (mx >> lowres + 1);
src_y = s->mb_y * block_s + (my >> lowres + 1);
offset = src_y * s->uvlinesize + src_x;
ptr = ref_picture[1] + offset;
if(s->flags&CODEC_FLAG_EMU_EDGE){
if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
|| (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
ptr= s->edge_emu_buffer;
emu=1;
if (s->flags & CODEC_FLAG_EMU_EDGE) {
if ((unsigned) src_x > h_edge_pos - (!!sx) - block_s ||
(unsigned) src_y > v_edge_pos - (!!sy) - block_s) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
ptr = s->edge_emu_buffer;
emu = 1;
}
}
sx= (sx << 2) >> lowres;
sy= (sy << 2) >> lowres;
sx = (sx << 2) >> lowres;
sy = (sy << 2) >> lowres;
pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
ptr = ref_picture[2] + offset;
if(emu){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
ptr= s->edge_emu_buffer;
if (emu) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
src_x, src_y, h_edge_pos, v_edge_pos);
ptr = s->edge_emu_buffer;
}
pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
}
......@@ -1912,32 +2011,36 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
* the motion vectors are taken from s->mv and the MV type from s->mv_type
*/
static inline void MPV_motion_lowres(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
uint8_t *dest_y, uint8_t *dest_cb,
uint8_t *dest_cr,
int dir, uint8_t **ref_picture,
h264_chroma_mc_func *pix_op)
{
int mx, my;
int mb_x, mb_y, i;
const int lowres= s->avctx->lowres;
const int block_s= 8>>lowres;
const int lowres = s->avctx->lowres;
const int block_s = 8 >>lowres;
mb_x = s->mb_x;
mb_y = s->mb_y;
switch(s->mv_type) {
switch (s->mv_type) {
case MV_TYPE_16X16:
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, 0,
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
s->mv[dir][0][0], s->mv[dir][0][1],
2 * block_s, mb_y);
break;
case MV_TYPE_8X8:
mx = 0;
my = 0;
for(i=0;i<4;i++) {
hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
for (i = 0; i < 4; i++) {
hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
s->linesize) * block_s,
ref_picture[0], 0, 0,
(2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
(2 * mb_x + (i & 1)) * block_s,
(2 * mb_y + (i >> 1)) * block_s,
s->width, s->height, s->linesize,
s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
block_s, block_s, pix_op,
......@@ -1947,8 +2050,9 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
my += s->mv[dir][i][1];
}
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
pix_op, mx, my);
break;
case MV_TYPE_FIELD:
if (s->picture_structure == PICT_FRAME) {
......@@ -1956,73 +2060,84 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, 0, s->field_select[dir][0],
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
s->mv[dir][0][0], s->mv[dir][0][1],
block_s, mb_y);
/* bottom field */
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, 1, s->field_select[dir][1],
ref_picture, pix_op,
s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
s->mv[dir][1][0], s->mv[dir][1][1],
block_s, mb_y);
} else {
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
if (s->picture_structure != s->field_select[dir][0] + 1 &&
s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
ref_picture = s->current_picture_ptr->f.data;
}
}
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->field_select[dir][0],
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
s->mv[dir][0][0],
s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
}
break;
case MV_TYPE_16X8:
for(i=0; i<2; i++){
uint8_t ** ref2picture;
for (i = 0; i < 2; i++) {
uint8_t **ref2picture;
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
ref2picture= ref_picture;
}else{
if (s->picture_structure == s->field_select[dir][i] + 1 ||
s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
ref2picture = ref_picture;
} else {
ref2picture = s->current_picture_ptr->f.data;
}
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->field_select[dir][i],
ref2picture, pix_op,
s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
s->mv[dir][i][0], s->mv[dir][i][1] +
2 * block_s * i, block_s, mb_y >> 1);
dest_y += 2*block_s*s->linesize;
dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
dest_y += 2 * block_s * s->linesize;
dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
}
break;
case MV_TYPE_DMV:
if(s->picture_structure == PICT_FRAME){
for(i=0; i<2; i++){
if (s->picture_structure == PICT_FRAME) {
for (i = 0; i < 2; i++) {
int j;
for(j=0; j<2; j++){
for (j = 0; j < 2; j++) {
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, j, j^i,
1, j, j ^ i,
ref_picture, pix_op,
s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
s->mv[dir][2 * i + j][0],
s->mv[dir][2 * i + j][1],
block_s, mb_y);
}
pix_op = s->dsp.avg_h264_chroma_pixels_tab;
}
}else{
for(i=0; i<2; i++){
} else {
for (i = 0; i < 2; i++) {
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->picture_structure != i+1,
0, 0, s->picture_structure != i + 1,
ref_picture, pix_op,
s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2 * block_s, mb_y >> 1);
// after put we make avg of the same block
pix_op = s->dsp.avg_h264_chroma_pixels_tab;
//opposite parity is always in the same frame if this is second field
if(!s->first_field){
// opposite parity is always in the same
// frame if this is second field
if (!s->first_field) {
ref_picture = s->current_picture_ptr->f.data;
}
}
}
break;
default: assert(0);
default:
assert(0);
}
}
......
......@@ -604,6 +604,10 @@ int ff_thread_decode_frame(AVCodecContext *avctx,
*picture = p->frame;
*got_picture_ptr = p->got_frame;
picture->pkt_dts = p->avpkt.dts;
picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
picture->width = avctx->width;
picture->height = avctx->height;
picture->format = avctx->pix_fmt;
/*
* A later call with avkpt->size == 0 may loop over all threads,
......
......@@ -1093,6 +1093,8 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
if (ret >= 0 && *got_frame_ptr) {
avctx->frame_number++;
frame->pkt_dts = avpkt->dts;
if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt;
}
}
return ret;
......
......@@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 47
#define LIBAVCODEC_VERSION_MINOR 48
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
......
......@@ -23,7 +23,7 @@
#include "avcodec.h"
void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
{
dst->pts = src->pts;
dst->pos = src->pkt_pos;
......@@ -39,6 +39,8 @@ void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
dst->video->key_frame = src->key_frame;
dst->video->pict_type = src->pict_type;
}
return 0;
}
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame,
......
......@@ -36,7 +36,7 @@
* Copy the frame properties of src to dst, without copying the actual
* image data.
*/
void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
/**
* Create and return a picref reference from the data and properties
......
......@@ -27,9 +27,10 @@
#include "libavutil/samplefmt.h"
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
#include "libavcodec/avcodec.h"
#define LIBAVFILTER_VERSION_MAJOR 2
#define LIBAVFILTER_VERSION_MINOR 53
#define LIBAVFILTER_VERSION_MINOR 54
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
......@@ -953,4 +954,12 @@ static inline void avfilter_insert_outpad(AVFilterContext *f, unsigned index,
&f->output_pads, &f->outputs, p);
}
/**
* Copy the frame properties of src to dst, without copying the actual
* image data.
*
* @return 0 on success, a negative number on error.
*/
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
#endif /* AVFILTER_AVFILTER_H */
/*
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_BUFFERSRC_H
#define AVFILTER_BUFFERSRC_H
/**
* @file
* Memory buffer source API.
*/
#include "avfilter.h"
/**
* Add a buffer to the filtergraph s.
*
* @param buf buffer containing frame data to be passed down the filtergraph.
* This function will take ownership of buf, the user must not free it.
*/
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf);
#endif /* AVFILTER_BUFFERSRC_H */
......@@ -264,6 +264,7 @@ static int movie_get_frame(AVFilterLink *outlink)
/* use pkt_dts if pkt_pts is not available */
movie->picref->pts = movie->frame->pkt_pts == AV_NOPTS_VALUE ?
movie->frame->pkt_dts : movie->frame->pkt_pts;
if (!movie->frame->sample_aspect_ratio.num)
movie->picref->video->sample_aspect_ratio = st->sample_aspect_ratio;
av_dlog(outlink->src,
......
......@@ -26,6 +26,7 @@
#include "avfilter.h"
#include "internal.h"
#include "avcodec.h"
#include "buffersrc.h"
#include "vsrc_buffer.h"
#include "libavutil/imgutils.h"
......@@ -112,6 +113,23 @@ int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
return 0;
}
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
{
BufferSourceContext *c = s->priv;
if (c->picref) {
av_log(s, AV_LOG_ERROR,
"Buffering several frames is not supported. "
"Please consume all available frames before adding a new one.\n"
);
return AVERROR(EINVAL);
}
c->picref = buf;
return 0;
}
#if CONFIG_AVCODEC
#include "avcodec.h"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment