Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
F
ffmpeg.wasm-core
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Linshizhi
ffmpeg.wasm-core
Commits
3fe2a01d
Commit
3fe2a01d
authored
Oct 26, 2016
by
Anton Khirnov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
lavc: move decoding-related code from utils.c to a new file
parent
328cd2b5
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
925 additions
and
889 deletions
+925
-889
Makefile
libavcodec/Makefile
+1
-0
decode.c
libavcodec/decode.c
+924
-0
utils.c
libavcodec/utils.c
+0
-889
No files found.
libavcodec/Makefile
View file @
3fe2a01d
...
...
@@ -23,6 +23,7 @@ OBJS = allcodecs.o \
bsf.o
\
codec_desc.o
\
d3d11va.o
\
decode.o
\
dirac.o
\
dv_profile.o
\
encode.o
\
...
...
libavcodec/decode.c
0 → 100644
View file @
3fe2a01d
/*
* generic decoding-related code
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <string.h>
#include "config.h"
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/frame.h"
#include "libavutil/hwcontext.h"
#include "libavutil/imgutils.h"
#include "avcodec.h"
#include "bytestream.h"
#include "internal.h"
#include "thread.h"
static
int
apply_param_change
(
AVCodecContext
*
avctx
,
AVPacket
*
avpkt
)
{
int
size
=
0
,
ret
;
const
uint8_t
*
data
;
uint32_t
flags
;
data
=
av_packet_get_side_data
(
avpkt
,
AV_PKT_DATA_PARAM_CHANGE
,
&
size
);
if
(
!
data
)
return
0
;
if
(
!
(
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_PARAM_CHANGE
))
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"This decoder does not support parameter "
"changes, but PARAM_CHANGE side data was sent to it.
\n
"
);
ret
=
AVERROR
(
EINVAL
);
goto
fail2
;
}
if
(
size
<
4
)
goto
fail
;
flags
=
bytestream_get_le32
(
&
data
);
size
-=
4
;
if
(
flags
&
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT
)
{
if
(
size
<
4
)
goto
fail
;
avctx
->
channels
=
bytestream_get_le32
(
&
data
);
size
-=
4
;
}
if
(
flags
&
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT
)
{
if
(
size
<
8
)
goto
fail
;
avctx
->
channel_layout
=
bytestream_get_le64
(
&
data
);
size
-=
8
;
}
if
(
flags
&
AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE
)
{
if
(
size
<
4
)
goto
fail
;
avctx
->
sample_rate
=
bytestream_get_le32
(
&
data
);
size
-=
4
;
}
if
(
flags
&
AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS
)
{
if
(
size
<
8
)
goto
fail
;
avctx
->
width
=
bytestream_get_le32
(
&
data
);
avctx
->
height
=
bytestream_get_le32
(
&
data
);
size
-=
8
;
ret
=
ff_set_dimensions
(
avctx
,
avctx
->
width
,
avctx
->
height
);
if
(
ret
<
0
)
goto
fail2
;
}
return
0
;
fail:
av_log
(
avctx
,
AV_LOG_ERROR
,
"PARAM_CHANGE side data too small.
\n
"
);
ret
=
AVERROR_INVALIDDATA
;
fail2:
if
(
ret
<
0
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Error applying parameter changes.
\n
"
);
if
(
avctx
->
err_recognition
&
AV_EF_EXPLODE
)
return
ret
;
}
return
0
;
}
static
int
unrefcount_frame
(
AVCodecInternal
*
avci
,
AVFrame
*
frame
)
{
int
ret
;
/* move the original frame to our backup */
av_frame_unref
(
avci
->
to_free
);
av_frame_move_ref
(
avci
->
to_free
,
frame
);
/* now copy everything except the AVBufferRefs back
* note that we make a COPY of the side data, so calling av_frame_free() on
* the caller's frame will work properly */
ret
=
av_frame_copy_props
(
frame
,
avci
->
to_free
);
if
(
ret
<
0
)
return
ret
;
memcpy
(
frame
->
data
,
avci
->
to_free
->
data
,
sizeof
(
frame
->
data
));
memcpy
(
frame
->
linesize
,
avci
->
to_free
->
linesize
,
sizeof
(
frame
->
linesize
));
if
(
avci
->
to_free
->
extended_data
!=
avci
->
to_free
->
data
)
{
int
planes
=
av_get_channel_layout_nb_channels
(
avci
->
to_free
->
channel_layout
);
int
size
=
planes
*
sizeof
(
*
frame
->
extended_data
);
if
(
!
size
)
{
av_frame_unref
(
frame
);
return
AVERROR_BUG
;
}
frame
->
extended_data
=
av_malloc
(
size
);
if
(
!
frame
->
extended_data
)
{
av_frame_unref
(
frame
);
return
AVERROR
(
ENOMEM
);
}
memcpy
(
frame
->
extended_data
,
avci
->
to_free
->
extended_data
,
size
);
}
else
frame
->
extended_data
=
frame
->
data
;
frame
->
format
=
avci
->
to_free
->
format
;
frame
->
width
=
avci
->
to_free
->
width
;
frame
->
height
=
avci
->
to_free
->
height
;
frame
->
channel_layout
=
avci
->
to_free
->
channel_layout
;
frame
->
nb_samples
=
avci
->
to_free
->
nb_samples
;
return
0
;
}
static
int
do_decode
(
AVCodecContext
*
avctx
,
AVPacket
*
pkt
)
{
int
got_frame
;
int
ret
;
av_assert0
(
!
avctx
->
internal
->
buffer_frame
->
buf
[
0
]);
if
(
!
pkt
)
pkt
=
avctx
->
internal
->
buffer_pkt
;
// This is the lesser evil. The field is for compatibility with legacy users
// of the legacy API, and users using the new API should not be forced to
// even know about this field.
avctx
->
refcounted_frames
=
1
;
// Some codecs (at least wma lossless) will crash when feeding drain packets
// after EOF was signaled.
if
(
avctx
->
internal
->
draining_done
)
return
AVERROR_EOF
;
if
(
avctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
)
{
ret
=
avcodec_decode_video2
(
avctx
,
avctx
->
internal
->
buffer_frame
,
&
got_frame
,
pkt
);
if
(
ret
>=
0
)
ret
=
pkt
->
size
;
}
else
if
(
avctx
->
codec_type
==
AVMEDIA_TYPE_AUDIO
)
{
ret
=
avcodec_decode_audio4
(
avctx
,
avctx
->
internal
->
buffer_frame
,
&
got_frame
,
pkt
);
}
else
{
ret
=
AVERROR
(
EINVAL
);
}
if
(
ret
<
0
)
return
ret
;
if
(
avctx
->
internal
->
draining
&&
!
got_frame
)
avctx
->
internal
->
draining_done
=
1
;
if
(
ret
>=
pkt
->
size
)
{
av_packet_unref
(
avctx
->
internal
->
buffer_pkt
);
}
else
{
int
consumed
=
ret
;
if
(
pkt
!=
avctx
->
internal
->
buffer_pkt
)
{
av_packet_unref
(
avctx
->
internal
->
buffer_pkt
);
if
((
ret
=
av_packet_ref
(
avctx
->
internal
->
buffer_pkt
,
pkt
))
<
0
)
return
ret
;
}
avctx
->
internal
->
buffer_pkt
->
data
+=
consumed
;
avctx
->
internal
->
buffer_pkt
->
size
-=
consumed
;
avctx
->
internal
->
buffer_pkt
->
pts
=
AV_NOPTS_VALUE
;
avctx
->
internal
->
buffer_pkt
->
dts
=
AV_NOPTS_VALUE
;
}
if
(
got_frame
)
av_assert0
(
avctx
->
internal
->
buffer_frame
->
buf
[
0
]);
return
0
;
}
int
attribute_align_arg
avcodec_send_packet
(
AVCodecContext
*
avctx
,
const
AVPacket
*
avpkt
)
{
int
ret
;
if
(
!
avcodec_is_open
(
avctx
)
||
!
av_codec_is_decoder
(
avctx
->
codec
))
return
AVERROR
(
EINVAL
);
if
(
avctx
->
internal
->
draining
)
return
AVERROR_EOF
;
if
(
!
avpkt
||
!
avpkt
->
size
)
{
avctx
->
internal
->
draining
=
1
;
avpkt
=
NULL
;
if
(
!
(
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DELAY
))
return
0
;
}
if
(
avctx
->
codec
->
send_packet
)
{
if
(
avpkt
)
{
ret
=
apply_param_change
(
avctx
,
(
AVPacket
*
)
avpkt
);
if
(
ret
<
0
)
return
ret
;
}
return
avctx
->
codec
->
send_packet
(
avctx
,
avpkt
);
}
// Emulation via old API. Assume avpkt is likely not refcounted, while
// decoder output is always refcounted, and avoid copying.
if
(
avctx
->
internal
->
buffer_pkt
->
size
||
avctx
->
internal
->
buffer_frame
->
buf
[
0
])
return
AVERROR
(
EAGAIN
);
// The goal is decoding the first frame of the packet without using memcpy,
// because the common case is having only 1 frame per packet (especially
// with video, but audio too). In other cases, it can't be avoided, unless
// the user is feeding refcounted packets.
return
do_decode
(
avctx
,
(
AVPacket
*
)
avpkt
);
}
int
attribute_align_arg
avcodec_receive_frame
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
int
ret
;
av_frame_unref
(
frame
);
if
(
!
avcodec_is_open
(
avctx
)
||
!
av_codec_is_decoder
(
avctx
->
codec
))
return
AVERROR
(
EINVAL
);
if
(
avctx
->
codec
->
receive_frame
)
{
if
(
avctx
->
internal
->
draining
&&
!
(
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DELAY
))
return
AVERROR_EOF
;
return
avctx
->
codec
->
receive_frame
(
avctx
,
frame
);
}
// Emulation via old API.
if
(
!
avctx
->
internal
->
buffer_frame
->
buf
[
0
])
{
if
(
!
avctx
->
internal
->
buffer_pkt
->
size
&&
!
avctx
->
internal
->
draining
)
return
AVERROR
(
EAGAIN
);
while
(
1
)
{
if
((
ret
=
do_decode
(
avctx
,
avctx
->
internal
->
buffer_pkt
))
<
0
)
{
av_packet_unref
(
avctx
->
internal
->
buffer_pkt
);
return
ret
;
}
// Some audio decoders may consume partial data without returning
// a frame (fate-wmapro-2ch). There is no way to make the caller
// call avcodec_receive_frame() again without returning a frame,
// so try to decode more in these cases.
if
(
avctx
->
internal
->
buffer_frame
->
buf
[
0
]
||
!
avctx
->
internal
->
buffer_pkt
->
size
)
break
;
}
}
if
(
!
avctx
->
internal
->
buffer_frame
->
buf
[
0
])
return
avctx
->
internal
->
draining
?
AVERROR_EOF
:
AVERROR
(
EAGAIN
);
av_frame_move_ref
(
frame
,
avctx
->
internal
->
buffer_frame
);
return
0
;
}
int
attribute_align_arg
avcodec_decode_video2
(
AVCodecContext
*
avctx
,
AVFrame
*
picture
,
int
*
got_picture_ptr
,
AVPacket
*
avpkt
)
{
AVCodecInternal
*
avci
=
avctx
->
internal
;
int
ret
;
*
got_picture_ptr
=
0
;
if
((
avctx
->
coded_width
||
avctx
->
coded_height
)
&&
av_image_check_size
(
avctx
->
coded_width
,
avctx
->
coded_height
,
0
,
avctx
))
return
-
1
;
if
(
!
avctx
->
codec
->
decode
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"This decoder requires using the avcodec_send_packet() API.
\n
"
);
return
AVERROR
(
ENOSYS
);
}
avctx
->
internal
->
pkt
=
avpkt
;
ret
=
apply_param_change
(
avctx
,
avpkt
);
if
(
ret
<
0
)
return
ret
;
av_frame_unref
(
picture
);
if
((
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DELAY
)
||
avpkt
->
size
||
(
avctx
->
active_thread_type
&
FF_THREAD_FRAME
))
{
if
(
HAVE_THREADS
&&
avctx
->
active_thread_type
&
FF_THREAD_FRAME
)
ret
=
ff_thread_decode_frame
(
avctx
,
picture
,
got_picture_ptr
,
avpkt
);
else
{
ret
=
avctx
->
codec
->
decode
(
avctx
,
picture
,
got_picture_ptr
,
avpkt
);
if
(
!
(
avctx
->
codec
->
caps_internal
&
FF_CODEC_CAP_SETS_PKT_DTS
))
picture
->
pkt_dts
=
avpkt
->
dts
;
/* get_buffer is supposed to set frame parameters */
if
(
!
(
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DR1
))
{
picture
->
sample_aspect_ratio
=
avctx
->
sample_aspect_ratio
;
picture
->
width
=
avctx
->
width
;
picture
->
height
=
avctx
->
height
;
picture
->
format
=
avctx
->
pix_fmt
;
}
}
emms_c
();
//needed to avoid an emms_c() call before every return;
if
(
*
got_picture_ptr
)
{
if
(
!
avctx
->
refcounted_frames
)
{
int
err
=
unrefcount_frame
(
avci
,
picture
);
if
(
err
<
0
)
return
err
;
}
avctx
->
frame_number
++
;
}
else
av_frame_unref
(
picture
);
}
else
ret
=
0
;
#if FF_API_AVCTX_TIMEBASE
if
(
avctx
->
framerate
.
num
>
0
&&
avctx
->
framerate
.
den
>
0
)
avctx
->
time_base
=
av_inv_q
(
avctx
->
framerate
);
#endif
return
ret
;
}
int
attribute_align_arg
avcodec_decode_audio4
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
,
int
*
got_frame_ptr
,
AVPacket
*
avpkt
)
{
AVCodecInternal
*
avci
=
avctx
->
internal
;
int
ret
=
0
;
*
got_frame_ptr
=
0
;
if
(
!
avctx
->
codec
->
decode
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"This decoder requires using the avcodec_send_packet() API.
\n
"
);
return
AVERROR
(
ENOSYS
);
}
avctx
->
internal
->
pkt
=
avpkt
;
if
(
!
avpkt
->
data
&&
avpkt
->
size
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"invalid packet: NULL data, size != 0
\n
"
);
return
AVERROR
(
EINVAL
);
}
ret
=
apply_param_change
(
avctx
,
avpkt
);
if
(
ret
<
0
)
return
ret
;
av_frame_unref
(
frame
);
if
((
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DELAY
)
||
avpkt
->
size
)
{
ret
=
avctx
->
codec
->
decode
(
avctx
,
frame
,
got_frame_ptr
,
avpkt
);
if
(
ret
>=
0
&&
*
got_frame_ptr
)
{
avctx
->
frame_number
++
;
frame
->
pkt_dts
=
avpkt
->
dts
;
if
(
frame
->
format
==
AV_SAMPLE_FMT_NONE
)
frame
->
format
=
avctx
->
sample_fmt
;
if
(
!
avctx
->
refcounted_frames
)
{
int
err
=
unrefcount_frame
(
avci
,
frame
);
if
(
err
<
0
)
return
err
;
}
}
else
av_frame_unref
(
frame
);
}
return
ret
;
}
int
avcodec_decode_subtitle2
(
AVCodecContext
*
avctx
,
AVSubtitle
*
sub
,
int
*
got_sub_ptr
,
AVPacket
*
avpkt
)
{
int
ret
;
avctx
->
internal
->
pkt
=
avpkt
;
*
got_sub_ptr
=
0
;
ret
=
avctx
->
codec
->
decode
(
avctx
,
sub
,
got_sub_ptr
,
avpkt
);
if
(
*
got_sub_ptr
)
avctx
->
frame_number
++
;
return
ret
;
}
static
int
is_hwaccel_pix_fmt
(
enum
AVPixelFormat
pix_fmt
)
{
const
AVPixFmtDescriptor
*
desc
=
av_pix_fmt_desc_get
(
pix_fmt
);
return
desc
->
flags
&
AV_PIX_FMT_FLAG_HWACCEL
;
}
enum
AVPixelFormat
avcodec_default_get_format
(
struct
AVCodecContext
*
s
,
const
enum
AVPixelFormat
*
fmt
)
{
while
(
*
fmt
!=
AV_PIX_FMT_NONE
&&
is_hwaccel_pix_fmt
(
*
fmt
))
++
fmt
;
return
fmt
[
0
];
}
static
AVHWAccel
*
find_hwaccel
(
enum
AVCodecID
codec_id
,
enum
AVPixelFormat
pix_fmt
)
{
AVHWAccel
*
hwaccel
=
NULL
;
while
((
hwaccel
=
av_hwaccel_next
(
hwaccel
)))
if
(
hwaccel
->
id
==
codec_id
&&
hwaccel
->
pix_fmt
==
pix_fmt
)
return
hwaccel
;
return
NULL
;
}
static
int
setup_hwaccel
(
AVCodecContext
*
avctx
,
const
enum
AVPixelFormat
fmt
,
const
char
*
name
)
{
AVHWAccel
*
hwa
=
find_hwaccel
(
avctx
->
codec_id
,
fmt
);
int
ret
=
0
;
if
(
!
hwa
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Could not find an AVHWAccel for the pixel format: %s"
,
name
);
return
AVERROR
(
ENOENT
);
}
if
(
hwa
->
priv_data_size
)
{
avctx
->
internal
->
hwaccel_priv_data
=
av_mallocz
(
hwa
->
priv_data_size
);
if
(
!
avctx
->
internal
->
hwaccel_priv_data
)
return
AVERROR
(
ENOMEM
);
}
if
(
hwa
->
init
)
{
ret
=
hwa
->
init
(
avctx
);
if
(
ret
<
0
)
{
av_freep
(
&
avctx
->
internal
->
hwaccel_priv_data
);
return
ret
;
}
}
avctx
->
hwaccel
=
hwa
;
return
0
;
}
int
ff_get_format
(
AVCodecContext
*
avctx
,
const
enum
AVPixelFormat
*
fmt
)
{
const
AVPixFmtDescriptor
*
desc
;
enum
AVPixelFormat
*
choices
;
enum
AVPixelFormat
ret
;
unsigned
n
=
0
;
while
(
fmt
[
n
]
!=
AV_PIX_FMT_NONE
)
++
n
;
av_assert0
(
n
>=
1
);
avctx
->
sw_pix_fmt
=
fmt
[
n
-
1
];
av_assert2
(
!
is_hwaccel_pix_fmt
(
avctx
->
sw_pix_fmt
));
choices
=
av_malloc_array
(
n
+
1
,
sizeof
(
*
choices
));
if
(
!
choices
)
return
AV_PIX_FMT_NONE
;
memcpy
(
choices
,
fmt
,
(
n
+
1
)
*
sizeof
(
*
choices
));
for
(;;)
{
if
(
avctx
->
hwaccel
&&
avctx
->
hwaccel
->
uninit
)
avctx
->
hwaccel
->
uninit
(
avctx
);
av_freep
(
&
avctx
->
internal
->
hwaccel_priv_data
);
avctx
->
hwaccel
=
NULL
;
av_buffer_unref
(
&
avctx
->
hw_frames_ctx
);
ret
=
avctx
->
get_format
(
avctx
,
choices
);
desc
=
av_pix_fmt_desc_get
(
ret
);
if
(
!
desc
)
{
ret
=
AV_PIX_FMT_NONE
;
break
;
}
if
(
!
(
desc
->
flags
&
AV_PIX_FMT_FLAG_HWACCEL
))
break
;
if
(
avctx
->
hw_frames_ctx
)
{
AVHWFramesContext
*
hw_frames_ctx
=
(
AVHWFramesContext
*
)
avctx
->
hw_frames_ctx
->
data
;
if
(
hw_frames_ctx
->
format
!=
ret
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Format returned from get_buffer() "
"does not match the format of provided AVHWFramesContext
\n
"
);
ret
=
AV_PIX_FMT_NONE
;
break
;
}
}
if
(
!
setup_hwaccel
(
avctx
,
ret
,
desc
->
name
))
break
;
/* Remove failed hwaccel from choices */
for
(
n
=
0
;
choices
[
n
]
!=
ret
;
n
++
)
av_assert0
(
choices
[
n
]
!=
AV_PIX_FMT_NONE
);
do
choices
[
n
]
=
choices
[
n
+
1
];
while
(
choices
[
n
++
]
!=
AV_PIX_FMT_NONE
);
}
av_freep
(
&
choices
);
return
ret
;
}
static
int
update_frame_pool
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
FramePool
*
pool
=
avctx
->
internal
->
pool
;
int
i
,
ret
;
switch
(
avctx
->
codec_type
)
{
case
AVMEDIA_TYPE_VIDEO
:
{
uint8_t
*
data
[
4
];
int
linesize
[
4
];
int
size
[
4
]
=
{
0
};
int
w
=
frame
->
width
;
int
h
=
frame
->
height
;
int
tmpsize
,
unaligned
;
if
(
pool
->
format
==
frame
->
format
&&
pool
->
width
==
frame
->
width
&&
pool
->
height
==
frame
->
height
)
return
0
;
avcodec_align_dimensions2
(
avctx
,
&
w
,
&
h
,
pool
->
stride_align
);
do
{
// NOTE: do not align linesizes individually, this breaks e.g. assumptions
// that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
av_image_fill_linesizes
(
linesize
,
avctx
->
pix_fmt
,
w
);
// increase alignment of w for next try (rhs gives the lowest bit set in w)
w
+=
w
&
~
(
w
-
1
);
unaligned
=
0
;
for
(
i
=
0
;
i
<
4
;
i
++
)
unaligned
|=
linesize
[
i
]
%
pool
->
stride_align
[
i
];
}
while
(
unaligned
);
tmpsize
=
av_image_fill_pointers
(
data
,
avctx
->
pix_fmt
,
h
,
NULL
,
linesize
);
if
(
tmpsize
<
0
)
return
-
1
;
for
(
i
=
0
;
i
<
3
&&
data
[
i
+
1
];
i
++
)
size
[
i
]
=
data
[
i
+
1
]
-
data
[
i
];
size
[
i
]
=
tmpsize
-
(
data
[
i
]
-
data
[
0
]);
for
(
i
=
0
;
i
<
4
;
i
++
)
{
av_buffer_pool_uninit
(
&
pool
->
pools
[
i
]);
pool
->
linesize
[
i
]
=
linesize
[
i
];
if
(
size
[
i
])
{
pool
->
pools
[
i
]
=
av_buffer_pool_init
(
size
[
i
]
+
16
,
NULL
);
if
(
!
pool
->
pools
[
i
])
{
ret
=
AVERROR
(
ENOMEM
);
goto
fail
;
}
}
}
pool
->
format
=
frame
->
format
;
pool
->
width
=
frame
->
width
;
pool
->
height
=
frame
->
height
;
break
;
}
case
AVMEDIA_TYPE_AUDIO
:
{
int
ch
=
av_get_channel_layout_nb_channels
(
frame
->
channel_layout
);
int
planar
=
av_sample_fmt_is_planar
(
frame
->
format
);
int
planes
=
planar
?
ch
:
1
;
if
(
pool
->
format
==
frame
->
format
&&
pool
->
planes
==
planes
&&
pool
->
channels
==
ch
&&
frame
->
nb_samples
==
pool
->
samples
)
return
0
;
av_buffer_pool_uninit
(
&
pool
->
pools
[
0
]);
ret
=
av_samples_get_buffer_size
(
&
pool
->
linesize
[
0
],
ch
,
frame
->
nb_samples
,
frame
->
format
,
0
);
if
(
ret
<
0
)
goto
fail
;
pool
->
pools
[
0
]
=
av_buffer_pool_init
(
pool
->
linesize
[
0
],
NULL
);
if
(
!
pool
->
pools
[
0
])
{
ret
=
AVERROR
(
ENOMEM
);
goto
fail
;
}
pool
->
format
=
frame
->
format
;
pool
->
planes
=
planes
;
pool
->
channels
=
ch
;
pool
->
samples
=
frame
->
nb_samples
;
break
;
}
default:
av_assert0
(
0
);
}
return
0
;
fail:
for
(
i
=
0
;
i
<
4
;
i
++
)
av_buffer_pool_uninit
(
&
pool
->
pools
[
i
]);
pool
->
format
=
-
1
;
pool
->
planes
=
pool
->
channels
=
pool
->
samples
=
0
;
pool
->
width
=
pool
->
height
=
0
;
return
ret
;
}
static
int
audio_get_buffer
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
FramePool
*
pool
=
avctx
->
internal
->
pool
;
int
planes
=
pool
->
planes
;
int
i
;
frame
->
linesize
[
0
]
=
pool
->
linesize
[
0
];
if
(
planes
>
AV_NUM_DATA_POINTERS
)
{
frame
->
extended_data
=
av_mallocz
(
planes
*
sizeof
(
*
frame
->
extended_data
));
frame
->
nb_extended_buf
=
planes
-
AV_NUM_DATA_POINTERS
;
frame
->
extended_buf
=
av_mallocz
(
frame
->
nb_extended_buf
*
sizeof
(
*
frame
->
extended_buf
));
if
(
!
frame
->
extended_data
||
!
frame
->
extended_buf
)
{
av_freep
(
&
frame
->
extended_data
);
av_freep
(
&
frame
->
extended_buf
);
return
AVERROR
(
ENOMEM
);
}
}
else
frame
->
extended_data
=
frame
->
data
;
for
(
i
=
0
;
i
<
FFMIN
(
planes
,
AV_NUM_DATA_POINTERS
);
i
++
)
{
frame
->
buf
[
i
]
=
av_buffer_pool_get
(
pool
->
pools
[
0
]);
if
(
!
frame
->
buf
[
i
])
goto
fail
;
frame
->
extended_data
[
i
]
=
frame
->
data
[
i
]
=
frame
->
buf
[
i
]
->
data
;
}
for
(
i
=
0
;
i
<
frame
->
nb_extended_buf
;
i
++
)
{
frame
->
extended_buf
[
i
]
=
av_buffer_pool_get
(
pool
->
pools
[
0
]);
if
(
!
frame
->
extended_buf
[
i
])
goto
fail
;
frame
->
extended_data
[
i
+
AV_NUM_DATA_POINTERS
]
=
frame
->
extended_buf
[
i
]
->
data
;
}
if
(
avctx
->
debug
&
FF_DEBUG_BUFFERS
)
av_log
(
avctx
,
AV_LOG_DEBUG
,
"default_get_buffer called on frame %p"
,
frame
);
return
0
;
fail:
av_frame_unref
(
frame
);
return
AVERROR
(
ENOMEM
);
}
static
int
video_get_buffer
(
AVCodecContext
*
s
,
AVFrame
*
pic
)
{
FramePool
*
pool
=
s
->
internal
->
pool
;
int
i
;
if
(
pic
->
data
[
0
])
{
av_log
(
s
,
AV_LOG_ERROR
,
"pic->data[0]!=NULL in avcodec_default_get_buffer
\n
"
);
return
-
1
;
}
memset
(
pic
->
data
,
0
,
sizeof
(
pic
->
data
));
pic
->
extended_data
=
pic
->
data
;
for
(
i
=
0
;
i
<
4
&&
pool
->
pools
[
i
];
i
++
)
{
pic
->
linesize
[
i
]
=
pool
->
linesize
[
i
];
pic
->
buf
[
i
]
=
av_buffer_pool_get
(
pool
->
pools
[
i
]);
if
(
!
pic
->
buf
[
i
])
goto
fail
;
pic
->
data
[
i
]
=
pic
->
buf
[
i
]
->
data
;
}
for
(;
i
<
AV_NUM_DATA_POINTERS
;
i
++
)
{
pic
->
data
[
i
]
=
NULL
;
pic
->
linesize
[
i
]
=
0
;
}
if
(
pic
->
data
[
1
]
&&
!
pic
->
data
[
2
])
avpriv_set_systematic_pal2
((
uint32_t
*
)
pic
->
data
[
1
],
s
->
pix_fmt
);
if
(
s
->
debug
&
FF_DEBUG_BUFFERS
)
av_log
(
s
,
AV_LOG_DEBUG
,
"default_get_buffer called on pic %p
\n
"
,
pic
);
return
0
;
fail:
av_frame_unref
(
pic
);
return
AVERROR
(
ENOMEM
);
}
int
avcodec_default_get_buffer2
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
,
int
flags
)
{
int
ret
;
if
(
avctx
->
hw_frames_ctx
)
return
av_hwframe_get_buffer
(
avctx
->
hw_frames_ctx
,
frame
,
0
);
if
((
ret
=
update_frame_pool
(
avctx
,
frame
))
<
0
)
return
ret
;
switch
(
avctx
->
codec_type
)
{
case
AVMEDIA_TYPE_VIDEO
:
return
video_get_buffer
(
avctx
,
frame
);
case
AVMEDIA_TYPE_AUDIO
:
return
audio_get_buffer
(
avctx
,
frame
);
default:
return
-
1
;
}
}
int
ff_decode_frame_props
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
AVPacket
*
pkt
=
avctx
->
internal
->
pkt
;
int
i
;
struct
{
enum
AVPacketSideDataType
packet
;
enum
AVFrameSideDataType
frame
;
}
sd
[]
=
{
{
AV_PKT_DATA_REPLAYGAIN
,
AV_FRAME_DATA_REPLAYGAIN
},
{
AV_PKT_DATA_DISPLAYMATRIX
,
AV_FRAME_DATA_DISPLAYMATRIX
},
{
AV_PKT_DATA_STEREO3D
,
AV_FRAME_DATA_STEREO3D
},
{
AV_PKT_DATA_AUDIO_SERVICE_TYPE
,
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
},
};
frame
->
color_primaries
=
avctx
->
color_primaries
;
frame
->
color_trc
=
avctx
->
color_trc
;
frame
->
colorspace
=
avctx
->
colorspace
;
frame
->
color_range
=
avctx
->
color_range
;
frame
->
chroma_location
=
avctx
->
chroma_sample_location
;
frame
->
reordered_opaque
=
avctx
->
reordered_opaque
;
if
(
!
pkt
)
{
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
frame
->
pkt_pts
=
AV_NOPTS_VALUE
;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
frame
->
pts
=
AV_NOPTS_VALUE
;
return
0
;
}
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
frame
->
pkt_pts
=
pkt
->
pts
;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
frame
->
pts
=
pkt
->
pts
;
for
(
i
=
0
;
i
<
FF_ARRAY_ELEMS
(
sd
);
i
++
)
{
int
size
;
uint8_t
*
packet_sd
=
av_packet_get_side_data
(
pkt
,
sd
[
i
].
packet
,
&
size
);
if
(
packet_sd
)
{
AVFrameSideData
*
frame_sd
=
av_frame_new_side_data
(
frame
,
sd
[
i
].
frame
,
size
);
if
(
!
frame_sd
)
return
AVERROR
(
ENOMEM
);
memcpy
(
frame_sd
->
data
,
packet_sd
,
size
);
}
}
return
0
;
}
int
ff_get_buffer
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
,
int
flags
)
{
const
AVHWAccel
*
hwaccel
=
avctx
->
hwaccel
;
int
override_dimensions
=
1
;
int
ret
;
switch
(
avctx
->
codec_type
)
{
case
AVMEDIA_TYPE_VIDEO
:
if
(
frame
->
width
<=
0
||
frame
->
height
<=
0
)
{
frame
->
width
=
FFMAX
(
avctx
->
width
,
avctx
->
coded_width
);
frame
->
height
=
FFMAX
(
avctx
->
height
,
avctx
->
coded_height
);
override_dimensions
=
0
;
}
if
(
frame
->
format
<
0
)
frame
->
format
=
avctx
->
pix_fmt
;
if
(
!
frame
->
sample_aspect_ratio
.
num
)
frame
->
sample_aspect_ratio
=
avctx
->
sample_aspect_ratio
;
if
(
av_image_check_sar
(
frame
->
width
,
frame
->
height
,
frame
->
sample_aspect_ratio
)
<
0
)
{
av_log
(
avctx
,
AV_LOG_WARNING
,
"ignoring invalid SAR: %u/%u
\n
"
,
frame
->
sample_aspect_ratio
.
num
,
frame
->
sample_aspect_ratio
.
den
);
frame
->
sample_aspect_ratio
=
(
AVRational
){
0
,
1
};
}
if
((
ret
=
av_image_check_size
(
avctx
->
width
,
avctx
->
height
,
0
,
avctx
))
<
0
)
return
ret
;
break
;
case
AVMEDIA_TYPE_AUDIO
:
if
(
!
frame
->
sample_rate
)
frame
->
sample_rate
=
avctx
->
sample_rate
;
if
(
frame
->
format
<
0
)
frame
->
format
=
avctx
->
sample_fmt
;
if
(
!
frame
->
channel_layout
)
{
if
(
avctx
->
channel_layout
)
{
if
(
av_get_channel_layout_nb_channels
(
avctx
->
channel_layout
)
!=
avctx
->
channels
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Inconsistent channel "
"configuration.
\n
"
);
return
AVERROR
(
EINVAL
);
}
frame
->
channel_layout
=
avctx
->
channel_layout
;
}
else
{
if
(
avctx
->
channels
>
FF_SANE_NB_CHANNELS
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Too many channels: %d.
\n
"
,
avctx
->
channels
);
return
AVERROR
(
ENOSYS
);
}
frame
->
channel_layout
=
av_get_default_channel_layout
(
avctx
->
channels
);
if
(
!
frame
->
channel_layout
)
frame
->
channel_layout
=
(
1ULL
<<
avctx
->
channels
)
-
1
;
}
}
break
;
default:
return
AVERROR
(
EINVAL
);
}
ret
=
ff_decode_frame_props
(
avctx
,
frame
);
if
(
ret
<
0
)
return
ret
;
if
(
hwaccel
)
{
if
(
hwaccel
->
alloc_frame
)
{
ret
=
hwaccel
->
alloc_frame
(
avctx
,
frame
);
goto
end
;
}
}
else
avctx
->
sw_pix_fmt
=
avctx
->
pix_fmt
;
ret
=
avctx
->
get_buffer2
(
avctx
,
frame
,
flags
);
end:
if
(
avctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
&&
!
override_dimensions
)
{
frame
->
width
=
avctx
->
width
;
frame
->
height
=
avctx
->
height
;
}
return
ret
;
}
int
ff_reget_buffer
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
AVFrame
*
tmp
;
int
ret
;
av_assert0
(
avctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
);
if
(
!
frame
->
data
[
0
])
return
ff_get_buffer
(
avctx
,
frame
,
AV_GET_BUFFER_FLAG_REF
);
if
(
av_frame_is_writable
(
frame
))
return
ff_decode_frame_props
(
avctx
,
frame
);
tmp
=
av_frame_alloc
();
if
(
!
tmp
)
return
AVERROR
(
ENOMEM
);
av_frame_move_ref
(
tmp
,
frame
);
ret
=
ff_get_buffer
(
avctx
,
frame
,
AV_GET_BUFFER_FLAG_REF
);
if
(
ret
<
0
)
{
av_frame_free
(
&
tmp
);
return
ret
;
}
av_frame_copy
(
frame
,
tmp
);
av_frame_free
(
&
tmp
);
return
0
;
}
void
avcodec_flush_buffers
(
AVCodecContext
*
avctx
)
{
avctx
->
internal
->
draining
=
0
;
avctx
->
internal
->
draining_done
=
0
;
av_frame_unref
(
avctx
->
internal
->
buffer_frame
);
av_packet_unref
(
avctx
->
internal
->
buffer_pkt
);
avctx
->
internal
->
buffer_pkt_valid
=
0
;
if
(
HAVE_THREADS
&&
avctx
->
active_thread_type
&
FF_THREAD_FRAME
)
ff_thread_flush
(
avctx
);
else
if
(
avctx
->
codec
->
flush
)
avctx
->
codec
->
flush
(
avctx
);
if
(
!
avctx
->
refcounted_frames
)
av_frame_unref
(
avctx
->
internal
->
to_free
);
}
libavcodec/utils.c
View file @
3fe2a01d
...
...
@@ -335,373 +335,6 @@ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
return
ret
;
}
static
int
update_frame_pool
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
FramePool
*
pool
=
avctx
->
internal
->
pool
;
int
i
,
ret
;
switch
(
avctx
->
codec_type
)
{
case
AVMEDIA_TYPE_VIDEO
:
{
uint8_t
*
data
[
4
];
int
linesize
[
4
];
int
size
[
4
]
=
{
0
};
int
w
=
frame
->
width
;
int
h
=
frame
->
height
;
int
tmpsize
,
unaligned
;
if
(
pool
->
format
==
frame
->
format
&&
pool
->
width
==
frame
->
width
&&
pool
->
height
==
frame
->
height
)
return
0
;
avcodec_align_dimensions2
(
avctx
,
&
w
,
&
h
,
pool
->
stride_align
);
do
{
// NOTE: do not align linesizes individually, this breaks e.g. assumptions
// that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
av_image_fill_linesizes
(
linesize
,
avctx
->
pix_fmt
,
w
);
// increase alignment of w for next try (rhs gives the lowest bit set in w)
w
+=
w
&
~
(
w
-
1
);
unaligned
=
0
;
for
(
i
=
0
;
i
<
4
;
i
++
)
unaligned
|=
linesize
[
i
]
%
pool
->
stride_align
[
i
];
}
while
(
unaligned
);
tmpsize
=
av_image_fill_pointers
(
data
,
avctx
->
pix_fmt
,
h
,
NULL
,
linesize
);
if
(
tmpsize
<
0
)
return
-
1
;
for
(
i
=
0
;
i
<
3
&&
data
[
i
+
1
];
i
++
)
size
[
i
]
=
data
[
i
+
1
]
-
data
[
i
];
size
[
i
]
=
tmpsize
-
(
data
[
i
]
-
data
[
0
]);
for
(
i
=
0
;
i
<
4
;
i
++
)
{
av_buffer_pool_uninit
(
&
pool
->
pools
[
i
]);
pool
->
linesize
[
i
]
=
linesize
[
i
];
if
(
size
[
i
])
{
pool
->
pools
[
i
]
=
av_buffer_pool_init
(
size
[
i
]
+
16
,
NULL
);
if
(
!
pool
->
pools
[
i
])
{
ret
=
AVERROR
(
ENOMEM
);
goto
fail
;
}
}
}
pool
->
format
=
frame
->
format
;
pool
->
width
=
frame
->
width
;
pool
->
height
=
frame
->
height
;
break
;
}
case
AVMEDIA_TYPE_AUDIO
:
{
int
ch
=
av_get_channel_layout_nb_channels
(
frame
->
channel_layout
);
int
planar
=
av_sample_fmt_is_planar
(
frame
->
format
);
int
planes
=
planar
?
ch
:
1
;
if
(
pool
->
format
==
frame
->
format
&&
pool
->
planes
==
planes
&&
pool
->
channels
==
ch
&&
frame
->
nb_samples
==
pool
->
samples
)
return
0
;
av_buffer_pool_uninit
(
&
pool
->
pools
[
0
]);
ret
=
av_samples_get_buffer_size
(
&
pool
->
linesize
[
0
],
ch
,
frame
->
nb_samples
,
frame
->
format
,
0
);
if
(
ret
<
0
)
goto
fail
;
pool
->
pools
[
0
]
=
av_buffer_pool_init
(
pool
->
linesize
[
0
],
NULL
);
if
(
!
pool
->
pools
[
0
])
{
ret
=
AVERROR
(
ENOMEM
);
goto
fail
;
}
pool
->
format
=
frame
->
format
;
pool
->
planes
=
planes
;
pool
->
channels
=
ch
;
pool
->
samples
=
frame
->
nb_samples
;
break
;
}
default:
av_assert0
(
0
);
}
return
0
;
fail:
for
(
i
=
0
;
i
<
4
;
i
++
)
av_buffer_pool_uninit
(
&
pool
->
pools
[
i
]);
pool
->
format
=
-
1
;
pool
->
planes
=
pool
->
channels
=
pool
->
samples
=
0
;
pool
->
width
=
pool
->
height
=
0
;
return
ret
;
}
static
int
audio_get_buffer
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
FramePool
*
pool
=
avctx
->
internal
->
pool
;
int
planes
=
pool
->
planes
;
int
i
;
frame
->
linesize
[
0
]
=
pool
->
linesize
[
0
];
if
(
planes
>
AV_NUM_DATA_POINTERS
)
{
frame
->
extended_data
=
av_mallocz
(
planes
*
sizeof
(
*
frame
->
extended_data
));
frame
->
nb_extended_buf
=
planes
-
AV_NUM_DATA_POINTERS
;
frame
->
extended_buf
=
av_mallocz
(
frame
->
nb_extended_buf
*
sizeof
(
*
frame
->
extended_buf
));
if
(
!
frame
->
extended_data
||
!
frame
->
extended_buf
)
{
av_freep
(
&
frame
->
extended_data
);
av_freep
(
&
frame
->
extended_buf
);
return
AVERROR
(
ENOMEM
);
}
}
else
frame
->
extended_data
=
frame
->
data
;
for
(
i
=
0
;
i
<
FFMIN
(
planes
,
AV_NUM_DATA_POINTERS
);
i
++
)
{
frame
->
buf
[
i
]
=
av_buffer_pool_get
(
pool
->
pools
[
0
]);
if
(
!
frame
->
buf
[
i
])
goto
fail
;
frame
->
extended_data
[
i
]
=
frame
->
data
[
i
]
=
frame
->
buf
[
i
]
->
data
;
}
for
(
i
=
0
;
i
<
frame
->
nb_extended_buf
;
i
++
)
{
frame
->
extended_buf
[
i
]
=
av_buffer_pool_get
(
pool
->
pools
[
0
]);
if
(
!
frame
->
extended_buf
[
i
])
goto
fail
;
frame
->
extended_data
[
i
+
AV_NUM_DATA_POINTERS
]
=
frame
->
extended_buf
[
i
]
->
data
;
}
if
(
avctx
->
debug
&
FF_DEBUG_BUFFERS
)
av_log
(
avctx
,
AV_LOG_DEBUG
,
"default_get_buffer called on frame %p"
,
frame
);
return
0
;
fail:
av_frame_unref
(
frame
);
return
AVERROR
(
ENOMEM
);
}
static
int
video_get_buffer
(
AVCodecContext
*
s
,
AVFrame
*
pic
)
{
FramePool
*
pool
=
s
->
internal
->
pool
;
int
i
;
if
(
pic
->
data
[
0
])
{
av_log
(
s
,
AV_LOG_ERROR
,
"pic->data[0]!=NULL in avcodec_default_get_buffer
\n
"
);
return
-
1
;
}
memset
(
pic
->
data
,
0
,
sizeof
(
pic
->
data
));
pic
->
extended_data
=
pic
->
data
;
for
(
i
=
0
;
i
<
4
&&
pool
->
pools
[
i
];
i
++
)
{
pic
->
linesize
[
i
]
=
pool
->
linesize
[
i
];
pic
->
buf
[
i
]
=
av_buffer_pool_get
(
pool
->
pools
[
i
]);
if
(
!
pic
->
buf
[
i
])
goto
fail
;
pic
->
data
[
i
]
=
pic
->
buf
[
i
]
->
data
;
}
for
(;
i
<
AV_NUM_DATA_POINTERS
;
i
++
)
{
pic
->
data
[
i
]
=
NULL
;
pic
->
linesize
[
i
]
=
0
;
}
if
(
pic
->
data
[
1
]
&&
!
pic
->
data
[
2
])
avpriv_set_systematic_pal2
((
uint32_t
*
)
pic
->
data
[
1
],
s
->
pix_fmt
);
if
(
s
->
debug
&
FF_DEBUG_BUFFERS
)
av_log
(
s
,
AV_LOG_DEBUG
,
"default_get_buffer called on pic %p
\n
"
,
pic
);
return
0
;
fail:
av_frame_unref
(
pic
);
return
AVERROR
(
ENOMEM
);
}
int
avcodec_default_get_buffer2
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
,
int
flags
)
{
int
ret
;
if
(
avctx
->
hw_frames_ctx
)
return
av_hwframe_get_buffer
(
avctx
->
hw_frames_ctx
,
frame
,
0
);
if
((
ret
=
update_frame_pool
(
avctx
,
frame
))
<
0
)
return
ret
;
switch
(
avctx
->
codec_type
)
{
case
AVMEDIA_TYPE_VIDEO
:
return
video_get_buffer
(
avctx
,
frame
);
case
AVMEDIA_TYPE_AUDIO
:
return
audio_get_buffer
(
avctx
,
frame
);
default:
return
-
1
;
}
}
int
ff_decode_frame_props
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
AVPacket
*
pkt
=
avctx
->
internal
->
pkt
;
int
i
;
struct
{
enum
AVPacketSideDataType
packet
;
enum
AVFrameSideDataType
frame
;
}
sd
[]
=
{
{
AV_PKT_DATA_REPLAYGAIN
,
AV_FRAME_DATA_REPLAYGAIN
},
{
AV_PKT_DATA_DISPLAYMATRIX
,
AV_FRAME_DATA_DISPLAYMATRIX
},
{
AV_PKT_DATA_STEREO3D
,
AV_FRAME_DATA_STEREO3D
},
{
AV_PKT_DATA_AUDIO_SERVICE_TYPE
,
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
},
};
frame
->
color_primaries
=
avctx
->
color_primaries
;
frame
->
color_trc
=
avctx
->
color_trc
;
frame
->
colorspace
=
avctx
->
colorspace
;
frame
->
color_range
=
avctx
->
color_range
;
frame
->
chroma_location
=
avctx
->
chroma_sample_location
;
frame
->
reordered_opaque
=
avctx
->
reordered_opaque
;
if
(
!
pkt
)
{
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
frame
->
pkt_pts
=
AV_NOPTS_VALUE
;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
frame
->
pts
=
AV_NOPTS_VALUE
;
return
0
;
}
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
frame
->
pkt_pts
=
pkt
->
pts
;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
frame
->
pts
=
pkt
->
pts
;
for
(
i
=
0
;
i
<
FF_ARRAY_ELEMS
(
sd
);
i
++
)
{
int
size
;
uint8_t
*
packet_sd
=
av_packet_get_side_data
(
pkt
,
sd
[
i
].
packet
,
&
size
);
if
(
packet_sd
)
{
AVFrameSideData
*
frame_sd
=
av_frame_new_side_data
(
frame
,
sd
[
i
].
frame
,
size
);
if
(
!
frame_sd
)
return
AVERROR
(
ENOMEM
);
memcpy
(
frame_sd
->
data
,
packet_sd
,
size
);
}
}
return
0
;
}
int
ff_get_buffer
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
,
int
flags
)
{
const
AVHWAccel
*
hwaccel
=
avctx
->
hwaccel
;
int
override_dimensions
=
1
;
int
ret
;
switch
(
avctx
->
codec_type
)
{
case
AVMEDIA_TYPE_VIDEO
:
if
(
frame
->
width
<=
0
||
frame
->
height
<=
0
)
{
frame
->
width
=
FFMAX
(
avctx
->
width
,
avctx
->
coded_width
);
frame
->
height
=
FFMAX
(
avctx
->
height
,
avctx
->
coded_height
);
override_dimensions
=
0
;
}
if
(
frame
->
format
<
0
)
frame
->
format
=
avctx
->
pix_fmt
;
if
(
!
frame
->
sample_aspect_ratio
.
num
)
frame
->
sample_aspect_ratio
=
avctx
->
sample_aspect_ratio
;
if
(
av_image_check_sar
(
frame
->
width
,
frame
->
height
,
frame
->
sample_aspect_ratio
)
<
0
)
{
av_log
(
avctx
,
AV_LOG_WARNING
,
"ignoring invalid SAR: %u/%u
\n
"
,
frame
->
sample_aspect_ratio
.
num
,
frame
->
sample_aspect_ratio
.
den
);
frame
->
sample_aspect_ratio
=
(
AVRational
){
0
,
1
};
}
if
((
ret
=
av_image_check_size
(
avctx
->
width
,
avctx
->
height
,
0
,
avctx
))
<
0
)
return
ret
;
break
;
case
AVMEDIA_TYPE_AUDIO
:
if
(
!
frame
->
sample_rate
)
frame
->
sample_rate
=
avctx
->
sample_rate
;
if
(
frame
->
format
<
0
)
frame
->
format
=
avctx
->
sample_fmt
;
if
(
!
frame
->
channel_layout
)
{
if
(
avctx
->
channel_layout
)
{
if
(
av_get_channel_layout_nb_channels
(
avctx
->
channel_layout
)
!=
avctx
->
channels
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Inconsistent channel "
"configuration.
\n
"
);
return
AVERROR
(
EINVAL
);
}
frame
->
channel_layout
=
avctx
->
channel_layout
;
}
else
{
if
(
avctx
->
channels
>
FF_SANE_NB_CHANNELS
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Too many channels: %d.
\n
"
,
avctx
->
channels
);
return
AVERROR
(
ENOSYS
);
}
frame
->
channel_layout
=
av_get_default_channel_layout
(
avctx
->
channels
);
if
(
!
frame
->
channel_layout
)
frame
->
channel_layout
=
(
1ULL
<<
avctx
->
channels
)
-
1
;
}
}
break
;
default:
return
AVERROR
(
EINVAL
);
}
ret
=
ff_decode_frame_props
(
avctx
,
frame
);
if
(
ret
<
0
)
return
ret
;
if
(
hwaccel
)
{
if
(
hwaccel
->
alloc_frame
)
{
ret
=
hwaccel
->
alloc_frame
(
avctx
,
frame
);
goto
end
;
}
}
else
avctx
->
sw_pix_fmt
=
avctx
->
pix_fmt
;
ret
=
avctx
->
get_buffer2
(
avctx
,
frame
,
flags
);
end:
if
(
avctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
&&
!
override_dimensions
)
{
frame
->
width
=
avctx
->
width
;
frame
->
height
=
avctx
->
height
;
}
return
ret
;
}
int
ff_reget_buffer
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
AVFrame
*
tmp
;
int
ret
;
av_assert0
(
avctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
);
if
(
!
frame
->
data
[
0
])
return
ff_get_buffer
(
avctx
,
frame
,
AV_GET_BUFFER_FLAG_REF
);
if
(
av_frame_is_writable
(
frame
))
return
ff_decode_frame_props
(
avctx
,
frame
);
tmp
=
av_frame_alloc
();
if
(
!
tmp
)
return
AVERROR
(
ENOMEM
);
av_frame_move_ref
(
tmp
,
frame
);
ret
=
ff_get_buffer
(
avctx
,
frame
,
AV_GET_BUFFER_FLAG_REF
);
if
(
ret
<
0
)
{
av_frame_free
(
&
tmp
);
return
ret
;
}
av_frame_copy
(
frame
,
tmp
);
av_frame_free
(
&
tmp
);
return
0
;
}
int
avcodec_default_execute
(
AVCodecContext
*
c
,
int
(
*
func
)(
AVCodecContext
*
c2
,
void
*
arg2
),
void
*
arg
,
int
*
ret
,
int
count
,
int
size
)
{
int
i
;
...
...
@@ -726,129 +359,6 @@ int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2,
return
0
;
}
static
int
is_hwaccel_pix_fmt
(
enum
AVPixelFormat
pix_fmt
)
{
const
AVPixFmtDescriptor
*
desc
=
av_pix_fmt_desc_get
(
pix_fmt
);
return
desc
->
flags
&
AV_PIX_FMT_FLAG_HWACCEL
;
}
enum
AVPixelFormat
avcodec_default_get_format
(
struct
AVCodecContext
*
s
,
const
enum
AVPixelFormat
*
fmt
)
{
while
(
*
fmt
!=
AV_PIX_FMT_NONE
&&
is_hwaccel_pix_fmt
(
*
fmt
))
++
fmt
;
return
fmt
[
0
];
}
static
AVHWAccel
*
find_hwaccel
(
enum
AVCodecID
codec_id
,
enum
AVPixelFormat
pix_fmt
)
{
AVHWAccel
*
hwaccel
=
NULL
;
while
((
hwaccel
=
av_hwaccel_next
(
hwaccel
)))
if
(
hwaccel
->
id
==
codec_id
&&
hwaccel
->
pix_fmt
==
pix_fmt
)
return
hwaccel
;
return
NULL
;
}
static
int
setup_hwaccel
(
AVCodecContext
*
avctx
,
const
enum
AVPixelFormat
fmt
,
const
char
*
name
)
{
AVHWAccel
*
hwa
=
find_hwaccel
(
avctx
->
codec_id
,
fmt
);
int
ret
=
0
;
if
(
!
hwa
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Could not find an AVHWAccel for the pixel format: %s"
,
name
);
return
AVERROR
(
ENOENT
);
}
if
(
hwa
->
priv_data_size
)
{
avctx
->
internal
->
hwaccel_priv_data
=
av_mallocz
(
hwa
->
priv_data_size
);
if
(
!
avctx
->
internal
->
hwaccel_priv_data
)
return
AVERROR
(
ENOMEM
);
}
if
(
hwa
->
init
)
{
ret
=
hwa
->
init
(
avctx
);
if
(
ret
<
0
)
{
av_freep
(
&
avctx
->
internal
->
hwaccel_priv_data
);
return
ret
;
}
}
avctx
->
hwaccel
=
hwa
;
return
0
;
}
int
ff_get_format
(
AVCodecContext
*
avctx
,
const
enum
AVPixelFormat
*
fmt
)
{
const
AVPixFmtDescriptor
*
desc
;
enum
AVPixelFormat
*
choices
;
enum
AVPixelFormat
ret
;
unsigned
n
=
0
;
while
(
fmt
[
n
]
!=
AV_PIX_FMT_NONE
)
++
n
;
av_assert0
(
n
>=
1
);
avctx
->
sw_pix_fmt
=
fmt
[
n
-
1
];
av_assert2
(
!
is_hwaccel_pix_fmt
(
avctx
->
sw_pix_fmt
));
choices
=
av_malloc_array
(
n
+
1
,
sizeof
(
*
choices
));
if
(
!
choices
)
return
AV_PIX_FMT_NONE
;
memcpy
(
choices
,
fmt
,
(
n
+
1
)
*
sizeof
(
*
choices
));
for
(;;)
{
if
(
avctx
->
hwaccel
&&
avctx
->
hwaccel
->
uninit
)
avctx
->
hwaccel
->
uninit
(
avctx
);
av_freep
(
&
avctx
->
internal
->
hwaccel_priv_data
);
avctx
->
hwaccel
=
NULL
;
av_buffer_unref
(
&
avctx
->
hw_frames_ctx
);
ret
=
avctx
->
get_format
(
avctx
,
choices
);
desc
=
av_pix_fmt_desc_get
(
ret
);
if
(
!
desc
)
{
ret
=
AV_PIX_FMT_NONE
;
break
;
}
if
(
!
(
desc
->
flags
&
AV_PIX_FMT_FLAG_HWACCEL
))
break
;
if
(
avctx
->
hw_frames_ctx
)
{
AVHWFramesContext
*
hw_frames_ctx
=
(
AVHWFramesContext
*
)
avctx
->
hw_frames_ctx
->
data
;
if
(
hw_frames_ctx
->
format
!=
ret
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Format returned from get_buffer() "
"does not match the format of provided AVHWFramesContext
\n
"
);
ret
=
AV_PIX_FMT_NONE
;
break
;
}
}
if
(
!
setup_hwaccel
(
avctx
,
ret
,
desc
->
name
))
break
;
/* Remove failed hwaccel from choices */
for
(
n
=
0
;
choices
[
n
]
!=
ret
;
n
++
)
av_assert0
(
choices
[
n
]
!=
AV_PIX_FMT_NONE
);
do
choices
[
n
]
=
choices
[
n
+
1
];
while
(
choices
[
n
++
]
!=
AV_PIX_FMT_NONE
);
}
av_freep
(
&
choices
);
return
ret
;
}
int
attribute_align_arg
avcodec_open2
(
AVCodecContext
*
avctx
,
const
AVCodec
*
codec
,
AVDictionary
**
options
)
{
int
ret
=
0
;
...
...
@@ -1210,244 +720,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
goto
end
;
}
static
int
apply_param_change
(
AVCodecContext
*
avctx
,
AVPacket
*
avpkt
)
{
int
size
=
0
,
ret
;
const
uint8_t
*
data
;
uint32_t
flags
;
data
=
av_packet_get_side_data
(
avpkt
,
AV_PKT_DATA_PARAM_CHANGE
,
&
size
);
if
(
!
data
)
return
0
;
if
(
!
(
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_PARAM_CHANGE
))
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"This decoder does not support parameter "
"changes, but PARAM_CHANGE side data was sent to it.
\n
"
);
ret
=
AVERROR
(
EINVAL
);
goto
fail2
;
}
if
(
size
<
4
)
goto
fail
;
flags
=
bytestream_get_le32
(
&
data
);
size
-=
4
;
if
(
flags
&
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT
)
{
if
(
size
<
4
)
goto
fail
;
avctx
->
channels
=
bytestream_get_le32
(
&
data
);
size
-=
4
;
}
if
(
flags
&
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT
)
{
if
(
size
<
8
)
goto
fail
;
avctx
->
channel_layout
=
bytestream_get_le64
(
&
data
);
size
-=
8
;
}
if
(
flags
&
AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE
)
{
if
(
size
<
4
)
goto
fail
;
avctx
->
sample_rate
=
bytestream_get_le32
(
&
data
);
size
-=
4
;
}
if
(
flags
&
AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS
)
{
if
(
size
<
8
)
goto
fail
;
avctx
->
width
=
bytestream_get_le32
(
&
data
);
avctx
->
height
=
bytestream_get_le32
(
&
data
);
size
-=
8
;
ret
=
ff_set_dimensions
(
avctx
,
avctx
->
width
,
avctx
->
height
);
if
(
ret
<
0
)
goto
fail2
;
}
return
0
;
fail:
av_log
(
avctx
,
AV_LOG_ERROR
,
"PARAM_CHANGE side data too small.
\n
"
);
ret
=
AVERROR_INVALIDDATA
;
fail2:
if
(
ret
<
0
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"Error applying parameter changes.
\n
"
);
if
(
avctx
->
err_recognition
&
AV_EF_EXPLODE
)
return
ret
;
}
return
0
;
}
static
int
unrefcount_frame
(
AVCodecInternal
*
avci
,
AVFrame
*
frame
)
{
int
ret
;
/* move the original frame to our backup */
av_frame_unref
(
avci
->
to_free
);
av_frame_move_ref
(
avci
->
to_free
,
frame
);
/* now copy everything except the AVBufferRefs back
* note that we make a COPY of the side data, so calling av_frame_free() on
* the caller's frame will work properly */
ret
=
av_frame_copy_props
(
frame
,
avci
->
to_free
);
if
(
ret
<
0
)
return
ret
;
memcpy
(
frame
->
data
,
avci
->
to_free
->
data
,
sizeof
(
frame
->
data
));
memcpy
(
frame
->
linesize
,
avci
->
to_free
->
linesize
,
sizeof
(
frame
->
linesize
));
if
(
avci
->
to_free
->
extended_data
!=
avci
->
to_free
->
data
)
{
int
planes
=
av_get_channel_layout_nb_channels
(
avci
->
to_free
->
channel_layout
);
int
size
=
planes
*
sizeof
(
*
frame
->
extended_data
);
if
(
!
size
)
{
av_frame_unref
(
frame
);
return
AVERROR_BUG
;
}
frame
->
extended_data
=
av_malloc
(
size
);
if
(
!
frame
->
extended_data
)
{
av_frame_unref
(
frame
);
return
AVERROR
(
ENOMEM
);
}
memcpy
(
frame
->
extended_data
,
avci
->
to_free
->
extended_data
,
size
);
}
else
frame
->
extended_data
=
frame
->
data
;
frame
->
format
=
avci
->
to_free
->
format
;
frame
->
width
=
avci
->
to_free
->
width
;
frame
->
height
=
avci
->
to_free
->
height
;
frame
->
channel_layout
=
avci
->
to_free
->
channel_layout
;
frame
->
nb_samples
=
avci
->
to_free
->
nb_samples
;
return
0
;
}
int
attribute_align_arg
avcodec_decode_video2
(
AVCodecContext
*
avctx
,
AVFrame
*
picture
,
int
*
got_picture_ptr
,
AVPacket
*
avpkt
)
{
AVCodecInternal
*
avci
=
avctx
->
internal
;
int
ret
;
*
got_picture_ptr
=
0
;
if
((
avctx
->
coded_width
||
avctx
->
coded_height
)
&&
av_image_check_size
(
avctx
->
coded_width
,
avctx
->
coded_height
,
0
,
avctx
))
return
-
1
;
if
(
!
avctx
->
codec
->
decode
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"This decoder requires using the avcodec_send_packet() API.
\n
"
);
return
AVERROR
(
ENOSYS
);
}
avctx
->
internal
->
pkt
=
avpkt
;
ret
=
apply_param_change
(
avctx
,
avpkt
);
if
(
ret
<
0
)
return
ret
;
av_frame_unref
(
picture
);
if
((
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DELAY
)
||
avpkt
->
size
||
(
avctx
->
active_thread_type
&
FF_THREAD_FRAME
))
{
if
(
HAVE_THREADS
&&
avctx
->
active_thread_type
&
FF_THREAD_FRAME
)
ret
=
ff_thread_decode_frame
(
avctx
,
picture
,
got_picture_ptr
,
avpkt
);
else
{
ret
=
avctx
->
codec
->
decode
(
avctx
,
picture
,
got_picture_ptr
,
avpkt
);
if
(
!
(
avctx
->
codec
->
caps_internal
&
FF_CODEC_CAP_SETS_PKT_DTS
))
picture
->
pkt_dts
=
avpkt
->
dts
;
/* get_buffer is supposed to set frame parameters */
if
(
!
(
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DR1
))
{
picture
->
sample_aspect_ratio
=
avctx
->
sample_aspect_ratio
;
picture
->
width
=
avctx
->
width
;
picture
->
height
=
avctx
->
height
;
picture
->
format
=
avctx
->
pix_fmt
;
}
}
emms_c
();
//needed to avoid an emms_c() call before every return;
if
(
*
got_picture_ptr
)
{
if
(
!
avctx
->
refcounted_frames
)
{
int
err
=
unrefcount_frame
(
avci
,
picture
);
if
(
err
<
0
)
return
err
;
}
avctx
->
frame_number
++
;
}
else
av_frame_unref
(
picture
);
}
else
ret
=
0
;
#if FF_API_AVCTX_TIMEBASE
if
(
avctx
->
framerate
.
num
>
0
&&
avctx
->
framerate
.
den
>
0
)
avctx
->
time_base
=
av_inv_q
(
avctx
->
framerate
);
#endif
return
ret
;
}
int
attribute_align_arg
avcodec_decode_audio4
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
,
int
*
got_frame_ptr
,
AVPacket
*
avpkt
)
{
AVCodecInternal
*
avci
=
avctx
->
internal
;
int
ret
=
0
;
*
got_frame_ptr
=
0
;
if
(
!
avctx
->
codec
->
decode
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"This decoder requires using the avcodec_send_packet() API.
\n
"
);
return
AVERROR
(
ENOSYS
);
}
avctx
->
internal
->
pkt
=
avpkt
;
if
(
!
avpkt
->
data
&&
avpkt
->
size
)
{
av_log
(
avctx
,
AV_LOG_ERROR
,
"invalid packet: NULL data, size != 0
\n
"
);
return
AVERROR
(
EINVAL
);
}
ret
=
apply_param_change
(
avctx
,
avpkt
);
if
(
ret
<
0
)
return
ret
;
av_frame_unref
(
frame
);
if
((
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DELAY
)
||
avpkt
->
size
)
{
ret
=
avctx
->
codec
->
decode
(
avctx
,
frame
,
got_frame_ptr
,
avpkt
);
if
(
ret
>=
0
&&
*
got_frame_ptr
)
{
avctx
->
frame_number
++
;
frame
->
pkt_dts
=
avpkt
->
dts
;
if
(
frame
->
format
==
AV_SAMPLE_FMT_NONE
)
frame
->
format
=
avctx
->
sample_fmt
;
if
(
!
avctx
->
refcounted_frames
)
{
int
err
=
unrefcount_frame
(
avci
,
frame
);
if
(
err
<
0
)
return
err
;
}
}
else
av_frame_unref
(
frame
);
}
return
ret
;
}
int
avcodec_decode_subtitle2
(
AVCodecContext
*
avctx
,
AVSubtitle
*
sub
,
int
*
got_sub_ptr
,
AVPacket
*
avpkt
)
{
int
ret
;
avctx
->
internal
->
pkt
=
avpkt
;
*
got_sub_ptr
=
0
;
ret
=
avctx
->
codec
->
decode
(
avctx
,
sub
,
got_sub_ptr
,
avpkt
);
if
(
*
got_sub_ptr
)
avctx
->
frame_number
++
;
return
ret
;
}
void
avsubtitle_free
(
AVSubtitle
*
sub
)
{
int
i
;
...
...
@@ -1467,150 +739,6 @@ void avsubtitle_free(AVSubtitle *sub)
memset
(
sub
,
0
,
sizeof
(
AVSubtitle
));
}
static
int
do_decode
(
AVCodecContext
*
avctx
,
AVPacket
*
pkt
)
{
int
got_frame
;
int
ret
;
av_assert0
(
!
avctx
->
internal
->
buffer_frame
->
buf
[
0
]);
if
(
!
pkt
)
pkt
=
avctx
->
internal
->
buffer_pkt
;
// This is the lesser evil. The field is for compatibility with legacy users
// of the legacy API, and users using the new API should not be forced to
// even know about this field.
avctx
->
refcounted_frames
=
1
;
// Some codecs (at least wma lossless) will crash when feeding drain packets
// after EOF was signaled.
if
(
avctx
->
internal
->
draining_done
)
return
AVERROR_EOF
;
if
(
avctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
)
{
ret
=
avcodec_decode_video2
(
avctx
,
avctx
->
internal
->
buffer_frame
,
&
got_frame
,
pkt
);
if
(
ret
>=
0
)
ret
=
pkt
->
size
;
}
else
if
(
avctx
->
codec_type
==
AVMEDIA_TYPE_AUDIO
)
{
ret
=
avcodec_decode_audio4
(
avctx
,
avctx
->
internal
->
buffer_frame
,
&
got_frame
,
pkt
);
}
else
{
ret
=
AVERROR
(
EINVAL
);
}
if
(
ret
<
0
)
return
ret
;
if
(
avctx
->
internal
->
draining
&&
!
got_frame
)
avctx
->
internal
->
draining_done
=
1
;
if
(
ret
>=
pkt
->
size
)
{
av_packet_unref
(
avctx
->
internal
->
buffer_pkt
);
}
else
{
int
consumed
=
ret
;
if
(
pkt
!=
avctx
->
internal
->
buffer_pkt
)
{
av_packet_unref
(
avctx
->
internal
->
buffer_pkt
);
if
((
ret
=
av_packet_ref
(
avctx
->
internal
->
buffer_pkt
,
pkt
))
<
0
)
return
ret
;
}
avctx
->
internal
->
buffer_pkt
->
data
+=
consumed
;
avctx
->
internal
->
buffer_pkt
->
size
-=
consumed
;
avctx
->
internal
->
buffer_pkt
->
pts
=
AV_NOPTS_VALUE
;
avctx
->
internal
->
buffer_pkt
->
dts
=
AV_NOPTS_VALUE
;
}
if
(
got_frame
)
av_assert0
(
avctx
->
internal
->
buffer_frame
->
buf
[
0
]);
return
0
;
}
int
attribute_align_arg
avcodec_send_packet
(
AVCodecContext
*
avctx
,
const
AVPacket
*
avpkt
)
{
int
ret
;
if
(
!
avcodec_is_open
(
avctx
)
||
!
av_codec_is_decoder
(
avctx
->
codec
))
return
AVERROR
(
EINVAL
);
if
(
avctx
->
internal
->
draining
)
return
AVERROR_EOF
;
if
(
!
avpkt
||
!
avpkt
->
size
)
{
avctx
->
internal
->
draining
=
1
;
avpkt
=
NULL
;
if
(
!
(
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DELAY
))
return
0
;
}
if
(
avctx
->
codec
->
send_packet
)
{
if
(
avpkt
)
{
ret
=
apply_param_change
(
avctx
,
(
AVPacket
*
)
avpkt
);
if
(
ret
<
0
)
return
ret
;
}
return
avctx
->
codec
->
send_packet
(
avctx
,
avpkt
);
}
// Emulation via old API. Assume avpkt is likely not refcounted, while
// decoder output is always refcounted, and avoid copying.
if
(
avctx
->
internal
->
buffer_pkt
->
size
||
avctx
->
internal
->
buffer_frame
->
buf
[
0
])
return
AVERROR
(
EAGAIN
);
// The goal is decoding the first frame of the packet without using memcpy,
// because the common case is having only 1 frame per packet (especially
// with video, but audio too). In other cases, it can't be avoided, unless
// the user is feeding refcounted packets.
return
do_decode
(
avctx
,
(
AVPacket
*
)
avpkt
);
}
int
attribute_align_arg
avcodec_receive_frame
(
AVCodecContext
*
avctx
,
AVFrame
*
frame
)
{
int
ret
;
av_frame_unref
(
frame
);
if
(
!
avcodec_is_open
(
avctx
)
||
!
av_codec_is_decoder
(
avctx
->
codec
))
return
AVERROR
(
EINVAL
);
if
(
avctx
->
codec
->
receive_frame
)
{
if
(
avctx
->
internal
->
draining
&&
!
(
avctx
->
codec
->
capabilities
&
AV_CODEC_CAP_DELAY
))
return
AVERROR_EOF
;
return
avctx
->
codec
->
receive_frame
(
avctx
,
frame
);
}
// Emulation via old API.
if
(
!
avctx
->
internal
->
buffer_frame
->
buf
[
0
])
{
if
(
!
avctx
->
internal
->
buffer_pkt
->
size
&&
!
avctx
->
internal
->
draining
)
return
AVERROR
(
EAGAIN
);
while
(
1
)
{
if
((
ret
=
do_decode
(
avctx
,
avctx
->
internal
->
buffer_pkt
))
<
0
)
{
av_packet_unref
(
avctx
->
internal
->
buffer_pkt
);
return
ret
;
}
// Some audio decoders may consume partial data without returning
// a frame (fate-wmapro-2ch). There is no way to make the caller
// call avcodec_receive_frame() again without returning a frame,
// so try to decode more in these cases.
if
(
avctx
->
internal
->
buffer_frame
->
buf
[
0
]
||
!
avctx
->
internal
->
buffer_pkt
->
size
)
break
;
}
}
if
(
!
avctx
->
internal
->
buffer_frame
->
buf
[
0
])
return
avctx
->
internal
->
draining
?
AVERROR_EOF
:
AVERROR
(
EAGAIN
);
av_frame_move_ref
(
frame
,
avctx
->
internal
->
buffer_frame
);
return
0
;
}
av_cold
int
avcodec_close
(
AVCodecContext
*
avctx
)
{
int
i
;
...
...
@@ -1953,23 +1081,6 @@ const char *avcodec_license(void)
return
LICENSE_PREFIX
LIBAV_LICENSE
+
sizeof
(
LICENSE_PREFIX
)
-
1
;
}
void
avcodec_flush_buffers
(
AVCodecContext
*
avctx
)
{
avctx
->
internal
->
draining
=
0
;
avctx
->
internal
->
draining_done
=
0
;
av_frame_unref
(
avctx
->
internal
->
buffer_frame
);
av_packet_unref
(
avctx
->
internal
->
buffer_pkt
);
avctx
->
internal
->
buffer_pkt_valid
=
0
;
if
(
HAVE_THREADS
&&
avctx
->
active_thread_type
&
FF_THREAD_FRAME
)
ff_thread_flush
(
avctx
);
else
if
(
avctx
->
codec
->
flush
)
avctx
->
codec
->
flush
(
avctx
);
if
(
!
avctx
->
refcounted_frames
)
av_frame_unref
(
avctx
->
internal
->
to_free
);
}
int
av_get_exact_bits_per_sample
(
enum
AVCodecID
codec_id
)
{
switch
(
codec_id
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment