Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
F
ffmpeg.wasm-core
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Linshizhi
ffmpeg.wasm-core
Commits
5d774301
Commit
5d774301
authored
Aug 24, 2016
by
Paul B Mahol
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
avfilter: add lut2 filter
parent
01aee814
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
415 additions
and
1 deletion
+415
-1
Changelog
Changelog
+1
-0
filters.texi
doc/filters.texi
+38
-0
Makefile
libavfilter/Makefile
+1
-0
allfilters.c
libavfilter/allfilters.c
+1
-0
version.h
libavfilter/version.h
+1
-1
vf_lut2.c
libavfilter/vf_lut2.c
+373
-0
No files found.
Changelog
View file @
5d774301
...
...
@@ -20,6 +20,7 @@ version <next>:
- fifo muxer
- maskedclamp filter
- hysteresis filter
- lut2 filter
version 3.1:
...
...
doc/filters.texi
View file @
5d774301
...
...
@@ -9253,6 +9253,44 @@ lutyuv=u='(val-maxval/2)*2+maxval/2':v='(val-maxval/2)*2+maxval/2'
@end example
@end itemize
@section lut2
Compute and apply a lookup table from two video inputs.
This filter accepts the following parameters:
@table @option
@item c0
set first pixel component expression
@item c1
set second pixel component expression
@item c2
set third pixel component expression
@item c3
set fourth pixel component expression, corresponds to the alpha component
@end table
Each of them specifies the expression to use for computing the lookup table for
the corresponding pixel component values.
The exact component associated to each of the @var{c*} options depends on the
format in inputs.
The expressions can contain the following constants:
@table @option
@item w
@item h
The input width and height.
@item x
The first input value for the pixel component.
@item y
The second input value for the pixel component.
@end table
All expressions default to "x".
@section maskedclamp
Clamp the first input stream with the second input and third input stream.
...
...
libavfilter/Makefile
View file @
5d774301
...
...
@@ -205,6 +205,7 @@ OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o
OBJS-$(CONFIG_LENSCORRECTION_FILTER)
+=
vf_lenscorrection.o
OBJS-$(CONFIG_LOOP_FILTER)
+=
f_loop.o
OBJS-$(CONFIG_LUT_FILTER)
+=
vf_lut.o
OBJS-$(CONFIG_LUT2_FILTER)
+=
vf_lut2.o
framesync.o
OBJS-$(CONFIG_LUT3D_FILTER)
+=
vf_lut3d.o
OBJS-$(CONFIG_LUTRGB_FILTER)
+=
vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER)
+=
vf_lut.o
...
...
libavfilter/allfilters.c
View file @
5d774301
...
...
@@ -222,6 +222,7 @@ void avfilter_register_all(void)
REGISTER_FILTER
(
LENSCORRECTION
,
lenscorrection
,
vf
);
REGISTER_FILTER
(
LOOP
,
loop
,
vf
);
REGISTER_FILTER
(
LUT
,
lut
,
vf
);
REGISTER_FILTER
(
LUT2
,
lut2
,
vf
);
REGISTER_FILTER
(
LUT3D
,
lut3d
,
vf
);
REGISTER_FILTER
(
LUTRGB
,
lutrgb
,
vf
);
REGISTER_FILTER
(
LUTYUV
,
lutyuv
,
vf
);
...
...
libavfilter/version.h
View file @
5d774301
...
...
@@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 6
#define LIBAVFILTER_VERSION_MINOR 5
4
#define LIBAVFILTER_VERSION_MINOR 5
5
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
...
...
libavfilter/vf_lut2.c
0 → 100644
View file @
5d774301
/*
* Copyright (c) 2016 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "framesync.h"
static
const
char
*
const
var_names
[]
=
{
"w"
,
///< width of the input video
"h"
,
///< height of the input video
"x"
,
///< input value for the pixel from input #1
"y"
,
///< input value for the pixel from input #2
NULL
};
enum
var_name
{
VAR_W
,
VAR_H
,
VAR_X
,
VAR_Y
,
VAR_VARS_NB
};
typedef
struct
LUT2Context
{
const
AVClass
*
class
;
char
*
comp_expr_str
[
4
];
AVExpr
*
comp_expr
[
4
];
double
var_values
[
VAR_VARS_NB
];
uint16_t
*
lut
[
4
];
///< lookup table for each component
int
width
[
4
],
height
[
4
];
int
nb_planes
;
int
depth
,
depthx
,
depthy
;
void
(
*
lut2
)(
struct
LUT2Context
*
s
,
AVFrame
*
dst
,
AVFrame
*
srcx
,
AVFrame
*
srcy
);
FFFrameSync
fs
;
}
LUT2Context
;
#define OFFSET(x) offsetof(LUT2Context, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static
const
AVOption
lut2_options
[]
=
{
{
"c0"
,
"set component #0 expression"
,
OFFSET
(
comp_expr_str
[
0
]),
AV_OPT_TYPE_STRING
,
{
.
str
=
"x"
},
.
flags
=
FLAGS
},
{
"c1"
,
"set component #1 expression"
,
OFFSET
(
comp_expr_str
[
1
]),
AV_OPT_TYPE_STRING
,
{
.
str
=
"x"
},
.
flags
=
FLAGS
},
{
"c2"
,
"set component #2 expression"
,
OFFSET
(
comp_expr_str
[
2
]),
AV_OPT_TYPE_STRING
,
{
.
str
=
"x"
},
.
flags
=
FLAGS
},
{
"c3"
,
"set component #3 expression"
,
OFFSET
(
comp_expr_str
[
3
]),
AV_OPT_TYPE_STRING
,
{
.
str
=
"x"
},
.
flags
=
FLAGS
},
{
NULL
}
};
static
av_cold
void
uninit
(
AVFilterContext
*
ctx
)
{
LUT2Context
*
s
=
ctx
->
priv
;
int
i
;
for
(
i
=
0
;
i
<
4
;
i
++
)
{
av_expr_free
(
s
->
comp_expr
[
i
]);
s
->
comp_expr
[
i
]
=
NULL
;
av_freep
(
&
s
->
comp_expr_str
[
i
]);
av_freep
(
&
s
->
lut
[
i
]);
}
}
static
int
query_formats
(
AVFilterContext
*
ctx
)
{
static
const
enum
AVPixelFormat
pix_fmts
[]
=
{
AV_PIX_FMT_YUVA444P
,
AV_PIX_FMT_YUV444P
,
AV_PIX_FMT_YUV440P
,
AV_PIX_FMT_YUVJ444P
,
AV_PIX_FMT_YUVJ440P
,
AV_PIX_FMT_YUVA422P
,
AV_PIX_FMT_YUV422P
,
AV_PIX_FMT_YUVA420P
,
AV_PIX_FMT_YUV420P
,
AV_PIX_FMT_YUVJ422P
,
AV_PIX_FMT_YUVJ420P
,
AV_PIX_FMT_YUVJ411P
,
AV_PIX_FMT_YUV411P
,
AV_PIX_FMT_YUV410P
,
AV_PIX_FMT_YUV420P9
,
AV_PIX_FMT_YUV422P9
,
AV_PIX_FMT_YUV444P9
,
AV_PIX_FMT_YUV420P10
,
AV_PIX_FMT_YUV422P10
,
AV_PIX_FMT_YUV444P10
,
AV_PIX_FMT_YUV420P12
,
AV_PIX_FMT_YUV422P12
,
AV_PIX_FMT_YUV444P12
,
AV_PIX_FMT_YUV440P12
,
AV_PIX_FMT_YUVA420P9
,
AV_PIX_FMT_YUVA422P9
,
AV_PIX_FMT_YUVA444P9
,
AV_PIX_FMT_YUVA420P10
,
AV_PIX_FMT_YUVA422P10
,
AV_PIX_FMT_YUVA444P10
,
AV_PIX_FMT_GBRP
,
AV_PIX_FMT_GBRP9
,
AV_PIX_FMT_GBRP10
,
AV_PIX_FMT_GBRP12
,
AV_PIX_FMT_GBRAP
,
AV_PIX_FMT_GBRAP12
,
AV_PIX_FMT_GRAY8
,
AV_PIX_FMT_NONE
};
return
ff_set_common_formats
(
ctx
,
ff_make_format_list
(
pix_fmts
));
}
static
int
config_inputx
(
AVFilterLink
*
inlink
)
{
AVFilterContext
*
ctx
=
inlink
->
dst
;
LUT2Context
*
s
=
ctx
->
priv
;
const
AVPixFmtDescriptor
*
desc
=
av_pix_fmt_desc_get
(
inlink
->
format
);
int
hsub
=
desc
->
log2_chroma_w
;
int
vsub
=
desc
->
log2_chroma_h
;
s
->
nb_planes
=
av_pix_fmt_count_planes
(
inlink
->
format
);
s
->
height
[
1
]
=
s
->
height
[
2
]
=
AV_CEIL_RSHIFT
(
inlink
->
h
,
vsub
);
s
->
height
[
0
]
=
s
->
height
[
3
]
=
inlink
->
h
;
s
->
width
[
1
]
=
s
->
width
[
2
]
=
AV_CEIL_RSHIFT
(
inlink
->
w
,
hsub
);
s
->
width
[
0
]
=
s
->
width
[
3
]
=
inlink
->
w
;
s
->
var_values
[
VAR_W
]
=
inlink
->
w
;
s
->
var_values
[
VAR_H
]
=
inlink
->
h
;
s
->
depthx
=
desc
->
comp
[
0
].
depth
;
return
0
;
}
static
int
config_inputy
(
AVFilterLink
*
inlink
)
{
AVFilterContext
*
ctx
=
inlink
->
dst
;
LUT2Context
*
s
=
ctx
->
priv
;
const
AVPixFmtDescriptor
*
desc
=
av_pix_fmt_desc_get
(
inlink
->
format
);
s
->
depthy
=
desc
->
comp
[
0
].
depth
;
return
0
;
}
static
void
lut2_8bit
(
struct
LUT2Context
*
s
,
AVFrame
*
out
,
AVFrame
*
srcx
,
AVFrame
*
srcy
)
{
int
p
,
y
,
x
;
for
(
p
=
0
;
p
<
s
->
nb_planes
;
p
++
)
{
const
uint16_t
*
lut
=
s
->
lut
[
p
];
const
uint8_t
*
srcxx
,
*
srcyy
;
uint8_t
*
dst
;
dst
=
out
->
data
[
p
];
srcxx
=
srcx
->
data
[
p
];
srcyy
=
srcy
->
data
[
p
];
for
(
y
=
0
;
y
<
s
->
height
[
p
];
y
++
)
{
for
(
x
=
0
;
x
<
s
->
width
[
p
];
x
++
)
{
dst
[
x
]
=
lut
[(
srcyy
[
x
]
<<
s
->
depthx
)
|
srcxx
[
x
]];
}
dst
+=
out
->
linesize
[
p
];
srcxx
+=
srcx
->
linesize
[
p
];
srcyy
+=
srcy
->
linesize
[
p
];
}
}
}
static
void
lut2_16bit
(
struct
LUT2Context
*
s
,
AVFrame
*
out
,
AVFrame
*
srcx
,
AVFrame
*
srcy
)
{
int
p
,
y
,
x
;
for
(
p
=
0
;
p
<
s
->
nb_planes
;
p
++
)
{
const
uint16_t
*
lut
=
s
->
lut
[
p
];
const
uint16_t
*
srcxx
,
*
srcyy
;
uint16_t
*
dst
;
dst
=
(
uint16_t
*
)
out
->
data
[
p
];
srcxx
=
(
uint16_t
*
)
srcx
->
data
[
p
];
srcyy
=
(
uint16_t
*
)
srcy
->
data
[
p
];
for
(
y
=
0
;
y
<
s
->
height
[
p
];
y
++
)
{
for
(
x
=
0
;
x
<
s
->
width
[
p
];
x
++
)
{
dst
[
x
]
=
lut
[(
srcyy
[
x
]
<<
s
->
depthx
)
|
srcxx
[
x
]];
}
dst
+=
out
->
linesize
[
p
]
/
2
;
srcxx
+=
srcx
->
linesize
[
p
]
/
2
;
srcyy
+=
srcy
->
linesize
[
p
]
/
2
;
}
}
}
static
int
process_frame
(
FFFrameSync
*
fs
)
{
AVFilterContext
*
ctx
=
fs
->
parent
;
LUT2Context
*
s
=
fs
->
opaque
;
AVFilterLink
*
outlink
=
ctx
->
outputs
[
0
];
AVFrame
*
out
,
*
srcx
,
*
srcy
;
int
ret
;
if
((
ret
=
ff_framesync_get_frame
(
&
s
->
fs
,
0
,
&
srcx
,
0
))
<
0
||
(
ret
=
ff_framesync_get_frame
(
&
s
->
fs
,
1
,
&
srcy
,
0
))
<
0
)
return
ret
;
if
(
ctx
->
is_disabled
)
{
out
=
av_frame_clone
(
srcx
);
if
(
!
out
)
return
AVERROR
(
ENOMEM
);
}
else
{
out
=
ff_get_video_buffer
(
outlink
,
outlink
->
w
,
outlink
->
h
);
if
(
!
out
)
return
AVERROR
(
ENOMEM
);
av_frame_copy_props
(
out
,
srcx
);
s
->
lut2
(
s
,
out
,
srcx
,
srcy
);
}
out
->
pts
=
av_rescale_q
(
s
->
fs
.
pts
,
s
->
fs
.
time_base
,
outlink
->
time_base
);
return
ff_filter_frame
(
outlink
,
out
);
}
static
int
config_output
(
AVFilterLink
*
outlink
)
{
AVFilterContext
*
ctx
=
outlink
->
src
;
LUT2Context
*
s
=
ctx
->
priv
;
AVFilterLink
*
srcx
=
ctx
->
inputs
[
0
];
AVFilterLink
*
srcy
=
ctx
->
inputs
[
1
];
FFFrameSyncIn
*
in
;
int
p
,
ret
;
s
->
depth
=
s
->
depthx
+
s
->
depthy
;
if
(
srcx
->
format
!=
srcy
->
format
)
{
av_log
(
ctx
,
AV_LOG_ERROR
,
"inputs must be of same pixel format
\n
"
);
return
AVERROR
(
EINVAL
);
}
if
(
srcx
->
w
!=
srcy
->
w
||
srcx
->
h
!=
srcy
->
h
||
srcx
->
sample_aspect_ratio
.
num
!=
srcy
->
sample_aspect_ratio
.
num
||
srcx
->
sample_aspect_ratio
.
den
!=
srcy
->
sample_aspect_ratio
.
den
)
{
av_log
(
ctx
,
AV_LOG_ERROR
,
"First input link %s parameters "
"(size %dx%d, SAR %d:%d) do not match the corresponding "
"second input link %s parameters (%dx%d, SAR %d:%d)
\n
"
,
ctx
->
input_pads
[
0
].
name
,
srcx
->
w
,
srcx
->
h
,
srcx
->
sample_aspect_ratio
.
num
,
srcx
->
sample_aspect_ratio
.
den
,
ctx
->
input_pads
[
1
].
name
,
srcy
->
w
,
srcy
->
h
,
srcy
->
sample_aspect_ratio
.
num
,
srcy
->
sample_aspect_ratio
.
den
);
return
AVERROR
(
EINVAL
);
}
outlink
->
w
=
srcx
->
w
;
outlink
->
h
=
srcx
->
h
;
outlink
->
time_base
=
srcx
->
time_base
;
outlink
->
sample_aspect_ratio
=
srcx
->
sample_aspect_ratio
;
outlink
->
frame_rate
=
srcx
->
frame_rate
;
if
((
ret
=
ff_framesync_init
(
&
s
->
fs
,
ctx
,
2
))
<
0
)
return
ret
;
in
=
s
->
fs
.
in
;
in
[
0
].
time_base
=
srcx
->
time_base
;
in
[
1
].
time_base
=
srcy
->
time_base
;
in
[
0
].
sync
=
1
;
in
[
0
].
before
=
EXT_STOP
;
in
[
0
].
after
=
EXT_INFINITY
;
in
[
1
].
sync
=
1
;
in
[
1
].
before
=
EXT_STOP
;
in
[
1
].
after
=
EXT_INFINITY
;
s
->
fs
.
opaque
=
s
;
s
->
fs
.
on_event
=
process_frame
;
s
->
lut2
=
s
->
depth
>
16
?
lut2_16bit
:
lut2_8bit
;
for
(
p
=
0
;
p
<
s
->
nb_planes
;
p
++
)
{
s
->
lut
[
p
]
=
av_malloc_array
(
1
<<
s
->
depth
,
sizeof
(
uint16_t
));
if
(
!
s
->
lut
[
p
])
return
AVERROR
(
ENOMEM
);
}
for
(
p
=
0
;
p
<
s
->
nb_planes
;
p
++
)
{
double
res
;
int
x
,
y
;
/* create the parsed expression */
av_expr_free
(
s
->
comp_expr
[
p
]);
s
->
comp_expr
[
p
]
=
NULL
;
ret
=
av_expr_parse
(
&
s
->
comp_expr
[
p
],
s
->
comp_expr_str
[
p
],
var_names
,
NULL
,
NULL
,
NULL
,
NULL
,
0
,
ctx
);
if
(
ret
<
0
)
{
av_log
(
ctx
,
AV_LOG_ERROR
,
"Error when parsing the expression '%s' for the component %d.
\n
"
,
s
->
comp_expr_str
[
p
],
p
);
return
AVERROR
(
EINVAL
);
}
/* compute the lut */
for
(
y
=
0
;
y
<
(
1
<<
s
->
depthx
);
y
++
)
{
s
->
var_values
[
VAR_Y
]
=
y
;
for
(
x
=
0
;
x
<
(
1
<<
s
->
depthx
);
x
++
)
{
s
->
var_values
[
VAR_X
]
=
x
;
res
=
av_expr_eval
(
s
->
comp_expr
[
p
],
s
->
var_values
,
s
);
if
(
isnan
(
res
))
{
av_log
(
ctx
,
AV_LOG_ERROR
,
"Error when evaluating the expression '%s' for the values %d and %d for the component %d.
\n
"
,
s
->
comp_expr_str
[
p
],
x
,
y
,
p
);
return
AVERROR
(
EINVAL
);
}
s
->
lut
[
p
][(
y
<<
s
->
depthx
)
+
x
]
=
res
;
}
}
}
return
ff_framesync_configure
(
&
s
->
fs
);
}
static
int
filter_frame
(
AVFilterLink
*
inlink
,
AVFrame
*
buf
)
{
LUT2Context
*
s
=
inlink
->
dst
->
priv
;
return
ff_framesync_filter_frame
(
&
s
->
fs
,
inlink
,
buf
);
}
static
int
request_frame
(
AVFilterLink
*
outlink
)
{
LUT2Context
*
s
=
outlink
->
src
->
priv
;
return
ff_framesync_request_frame
(
&
s
->
fs
,
outlink
);
}
static
const
AVFilterPad
inputs
[]
=
{
{
.
name
=
"srcx"
,
.
type
=
AVMEDIA_TYPE_VIDEO
,
.
filter_frame
=
filter_frame
,
.
config_props
=
config_inputx
,
},
{
.
name
=
"srcy"
,
.
type
=
AVMEDIA_TYPE_VIDEO
,
.
filter_frame
=
filter_frame
,
.
config_props
=
config_inputy
,
},
{
NULL
}
};
static
const
AVFilterPad
outputs
[]
=
{
{
.
name
=
"default"
,
.
type
=
AVMEDIA_TYPE_VIDEO
,
.
config_props
=
config_output
,
.
request_frame
=
request_frame
,
},
{
NULL
}
};
AVFILTER_DEFINE_CLASS
(
lut2
);
AVFilter
ff_vf_lut2
=
{
.
name
=
"lut2"
,
.
description
=
NULL_IF_CONFIG_SMALL
(
"Compute and apply a lookup table from two video inputs."
),
.
priv_size
=
sizeof
(
LUT2Context
),
.
priv_class
=
&
lut2_class
,
.
uninit
=
uninit
,
.
query_formats
=
query_formats
,
.
inputs
=
inputs
,
.
outputs
=
outputs
,
.
flags
=
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
,
};
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment