Commit a54ea19a authored by Mike Melanson's avatar Mike Melanson

VP3 post-processing loop filter; disabled until the correct final step

is determined

Originally committed as revision 3996 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent 9c7fb608
......@@ -2219,7 +2219,143 @@ av_log(s->avctx, AV_LOG_ERROR, " help! got beefy vector! (%X, %X)\n", motion_x,
}
emms_c();
}
#define SATURATE_U8(x) ((x) < 0) ? 0 : ((x) > 255) ? 255 : x
static void horizontal_filter(unsigned char *first_pixel, int stride,
int *bounding_values)
{
int i;
int filter_value;
for (i = 0; i < 8; i++, first_pixel += stride) {
filter_value =
(first_pixel[-2] * 1) -
(first_pixel[-1] * 3) +
(first_pixel[ 0] * 3) -
(first_pixel[ 1] * 1);
filter_value = bounding_values[(filter_value + 4) >> 3];
first_pixel[-1] = SATURATE_U8(first_pixel[-1] + filter_value);
first_pixel[ 0] = SATURATE_U8(first_pixel[ 0] - filter_value);
}
}
static void vertical_filter(unsigned char *first_pixel, int stride,
int *bounding_values)
{
int i;
int filter_value;
for (i = 0; i < 8; i++, first_pixel++) {
filter_value =
(first_pixel[-(2 * stride)] * 1) -
(first_pixel[-(1 * stride)] * 3) +
(first_pixel[ (0 )] * 3) -
(first_pixel[ (1 * stride)] * 1);
filter_value = bounding_values[(filter_value + 4) >> 3];
first_pixel[-(1 * stride)] = SATURATE_U8(first_pixel[-(1 * stride)] + filter_value);
first_pixel[0] = SATURATE_U8(first_pixel[0] - filter_value);
}
}
static void apply_loop_filter(Vp3DecodeContext *s)
{
int x, y, plane;
int width, height;
int fragment;
int stride;
unsigned char *plane_data;
int bounding_values[256];
int filter_limit;
/* find the right loop limit value */
for (x = 63; x >= 0; x--) {
if (vp31_ac_scale_factor[x] >= s->quality_index)
break;
}
filter_limit = vp31_filter_limit_values[x];
/* set up the bounding values */
memset(bounding_values, 0, 256 * sizeof(int));
for (x = 0; x < filter_limit; x++) {
bounding_values[-x - filter_limit] = -filter_limit + x;
bounding_values[-x] = -x;
bounding_values[x] = x;
bounding_values[x + filter_limit] = filter_limit - x;
}
for (plane = 0; plane < 3; plane++) {
if (plane == 0) {
/* Y plane parameters */
fragment = 0;
width = s->fragment_width;
height = s->fragment_height;
stride = s->current_frame.linesize[0];
plane_data = s->current_frame.data[0];
} else if (plane == 1) {
/* U plane parameters */
fragment = s->u_fragment_start;
width = s->fragment_width / 2;
height = s->fragment_height / 2;
stride = s->current_frame.linesize[1];
plane_data = s->current_frame.data[1];
} else {
/* V plane parameters */
fragment = s->v_fragment_start;
width = s->fragment_width / 2;
height = s->fragment_height / 2;
stride = s->current_frame.linesize[2];
plane_data = s->current_frame.data[2];
}
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
/* do not perform left edge filter for left columns frags */
if ((x > 0) &&
(s->all_fragments[fragment].coding_method != MODE_COPY)) {
horizontal_filter(
plane_data + s->all_fragments[fragment].first_pixel,
stride, bounding_values);
}
/* do not perform top edge filter for top row fragments */
if ((y > 0) &&
(s->all_fragments[fragment].coding_method != MODE_COPY)) {
vertical_filter(
plane_data + s->all_fragments[fragment].first_pixel,
stride, bounding_values);
}
/* do not perform right edge filter for right column
* fragments or if right fragment neighbor is also coded
* in this frame (it will be filtered in next iteration) */
if ((x < width - 1) &&
(s->all_fragments[fragment].coding_method != MODE_COPY) &&
(s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
horizontal_filter(
plane_data + s->all_fragments[fragment + 1].first_pixel,
stride, bounding_values);
}
/* do not perform bottom edge filter for bottom row
* fragments or if bottom fragment neighbor is also coded
* in this frame (it will be filtered in the next row) */
if ((y < height - 1) &&
(s->all_fragments[fragment].coding_method != MODE_COPY) &&
(s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
vertical_filter(
plane_data + s->all_fragments[fragment + width].first_pixel,
stride, bounding_values);
}
fragment++;
}
}
}
}
/*
......@@ -2601,6 +2737,7 @@ if (!s->keyframe) {
reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
render_fragments(s, 0, s->width, s->height, 0);
// apply_loop_filter(s);
if ((avctx->flags & CODEC_FLAG_GRAY) == 0) {
reverse_dc_prediction(s, s->u_fragment_start,
......
......@@ -61,6 +61,17 @@ static const uint32_t vp31_ac_scale_factor[64] =
21, 19, 18, 17, 15, 13, 12, 10
};
static const uint32_t vp31_filter_limit_values[64] =
{ 30, 25, 20, 20, 15, 15, 14, 14,
13, 13, 12, 12, 11, 11, 10, 10,
9, 9, 8, 8, 7, 7, 7, 7,
6, 6, 6, 6, 5, 5, 5, 5,
4, 4, 4, 4, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
/* table used to convert natural order <-> zigzag order */
static const int dezigzag_index[64] =
{ 0, 1, 8, 16, 9, 2, 3, 10,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment