Commit c3e2e842 authored by Clément Bœsch's avatar Clément Bœsch

Merge commit 'd78fd2fa'

* commit 'd78fd2fa':
  Add MagicYUV decoder

Changes observed from Libav:
- many cosmetics (function renames/move, spacing, line breaks)
- MagicYUVContext.slices_size is now unsigned
- use of pixdesc (include fixed in FFmpeg)
- mention of "Lossless" in the long name dropped (also removed from
  general.texi in FFmpeg)
- addition of the FF_CODEC_CAP_INIT_THREADSAFE caps
- use of qsort() instead of AV_QSORT() (NOT MERGED)
- use of AVCodecContext.{width,height} instead of AVCodecContext.coded_{width,height} (NOT MERGED)

See also 77f9c4b7Merged-by: 's avatarClément Bœsch <u@pkh.me>
parents a2c90d5f d78fd2fa
......@@ -745,7 +745,7 @@ following image formats are supported:
@item LucasArts SANM/Smush @tab @tab X
@tab Used in LucasArts games / SMUSH animations.
@item lossless MJPEG @tab X @tab X
@item MagicYUV Lossless Video @tab @tab X
@item MagicYUV Video @tab @tab X
@item Microsoft ATC Screen @tab @tab X
@tab Also known as Microsoft Screen 3.
@item Microsoft Expression Encoder Screen @tab @tab X
......
......@@ -1546,7 +1546,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
.id = AV_CODEC_ID_MAGICYUV,
.type = AVMEDIA_TYPE_VIDEO,
.name = "magicyuv",
.long_name = NULL_IF_CONFIG_SMALL("MagicYUV Lossless Video"),
.long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
......
......@@ -19,11 +19,12 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "libavutil/pixdesc.h"
#include "libavutil/qsort.h"
#include "avcodec.h"
#include "bytestream.h"
#include "get_bits.h"
......@@ -42,43 +43,36 @@ typedef enum Prediction {
MEDIAN,
} Prediction;
typedef struct MagicYUVContext {
AVFrame *p;
int slice_height;
int nb_slices;
int planes; // number of encoded planes in bitstream
int decorrelate; // postprocessing work
int interlaced; // video is interlaced
uint8_t *buf; // pointer to AVPacket->data
int hshift[4];
int vshift[4];
Slice *slices[4]; // slice positions and size in bitstream for each plane
int slices_size[4];
uint8_t len[4][256]; // table of code lengths for each plane
VLC vlc[4]; // VLC for each plane
HuffYUVDSPContext hdsp;
} MagicYUVContext;
static av_cold int decode_init(AVCodecContext *avctx)
{
MagicYUVContext *s = avctx->priv_data;
ff_huffyuvdsp_init(&s->hdsp);
return 0;
}
typedef struct HuffEntry {
uint8_t sym;
uint8_t len;
uint32_t code;
} HuffEntry;
static int ff_magy_huff_cmp_len(const void *a, const void *b)
typedef struct MagicYUVContext {
AVFrame *p;
int slice_height;
int nb_slices;
int planes; // number of encoded planes in bitstream
int decorrelate; // postprocessing work
int interlaced; // video is interlaced
uint8_t *buf; // pointer to AVPacket->data
int hshift[4];
int vshift[4];
Slice *slices[4]; // slice bitstream positions for each plane
unsigned int slices_size[4]; // slice sizes for each plane
uint8_t len[4][256]; // table of code lengths for each plane
VLC vlc[4]; // VLC for each plane
HuffYUVDSPContext hdsp;
} MagicYUVContext;
static int huff_cmp_len(const void *a, const void *b)
{
const HuffEntry *aa = a, *bb = b;
return (aa->len - bb->len) * 256 + aa->sym - bb->sym;
}
static int build_huff(VLC *vlc, uint8_t *len)
static int huff_build(VLC *vlc, uint8_t *len)
{
HuffEntry he[256];
uint32_t codes[256];
......@@ -91,7 +85,7 @@ static int build_huff(VLC *vlc, uint8_t *len)
he[i].sym = 255 - i;
he[i].len = len[i];
}
AV_QSORT(he, 256, HuffEntry, ff_magy_huff_cmp_len);
AV_QSORT(he, 256, HuffEntry, huff_cmp_len);
code = 1;
for (i = 255; i >= 0; i--) {
......@@ -108,56 +102,60 @@ static int build_huff(VLC *vlc, uint8_t *len)
syms, sizeof(*syms), sizeof(*syms), 0);
}
static int decode_slice(AVCodecContext *avctx, void *tdata,
int j, int threadnr)
static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
int j, int threadnr)
{
MagicYUVContext *s = avctx->priv_data;
int interlaced = s->interlaced;
AVFrame *p = s->p;
int i, k, x, ret;
GetBitContext b;
int i, k, x;
GetBitContext gb;
uint8_t *dst;
for (i = 0; i < s->planes; i++) {
int left, lefttop, top;
int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
int fake_stride = p->linesize[i] * (1 + interlaced);
int stride = p->linesize[i];
ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
ptrdiff_t stride = p->linesize[i];
int flags, pred;
int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
s->slices[i][j].size);
if ((ret = init_get_bits8(&b, s->buf + s->slices[i][j].start, s->slices[i][j].size)) < 0)
if (ret < 0)
return ret;
flags = get_bits(&b, 8);
pred = get_bits(&b, 8);
flags = get_bits(&gb, 8);
pred = get_bits(&gb, 8);
dst = p->data[i] + j * sheight * stride;
if (flags & 1) {
for (k = 0; k < height; k++) {
for (x = 0; x < width; x++) {
dst[x] = get_bits(&b, 8);
}
for (x = 0; x < width; x++)
dst[x] = get_bits(&gb, 8);
dst += stride;
}
} else {
for (k = 0; k < height; k++) {
for (x = 0; x < width; x++) {
int pix;
if (get_bits_left(&b) <= 0) {
if (get_bits_left(&gb) <= 0)
return AVERROR_INVALIDDATA;
}
pix = get_vlc2(&b, s->vlc[i].table, s->vlc[i].bits, 3);
if (pix < 0) {
pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
if (pix < 0)
return AVERROR_INVALIDDATA;
}
dst[x] = 255 - pix;
}
dst += stride;
}
}
if (pred == LEFT) {
switch (pred) {
case LEFT:
dst = p->data[i] + j * sheight * stride;
s->hdsp.add_hfyu_left_pred(dst, dst, width, 0);
dst += stride;
......@@ -169,9 +167,8 @@ static int decode_slice(AVCodecContext *avctx, void *tdata,
s->hdsp.add_hfyu_left_pred(dst, dst, width, dst[-fake_stride]);
dst += stride;
}
} else if (pred == GRADIENT) {
int left, lefttop, top;
break;
case GRADIENT:
dst = p->data[i] + j * sheight * stride;
s->hdsp.add_hfyu_left_pred(dst, dst, width, 0);
left = lefttop = 0;
......@@ -193,9 +190,8 @@ static int decode_slice(AVCodecContext *avctx, void *tdata,
}
dst += stride;
}
} else if (pred == MEDIAN) {
int left, lefttop;
break;
case MEDIAN:
dst = p->data[i] + j * sheight * stride;
lefttop = left = dst[0];
s->hdsp.add_hfyu_left_pred(dst, dst, width, 0);
......@@ -206,12 +202,14 @@ static int decode_slice(AVCodecContext *avctx, void *tdata,
dst += stride;
}
for (k = 1 + interlaced; k < height; k++) {
s->hdsp.add_hfyu_median_pred(dst, dst - fake_stride, dst, width, &left, &lefttop);
s->hdsp.add_hfyu_median_pred(dst, dst - fake_stride,
dst, width, &left, &lefttop);
lefttop = left = dst[0];
dst += stride;
}
} else {
avpriv_request_sample(avctx, "unknown prediction: %d", pred);
break;
default:
avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
}
}
......@@ -234,102 +232,106 @@ static int decode_slice(AVCodecContext *avctx, void *tdata,
return 0;
}
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
static int magy_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
uint32_t first_offset, offset, next_offset, header_size, slice_width;
int ret, format, version, table_size;
MagicYUVContext *s = avctx->priv_data;
ThreadFrame frame = { .f = data };
AVFrame *p = data;
GetByteContext gb;
GetBitContext b;
int i, j, k, width, height;
GetByteContext gbyte;
GetBitContext gbit;
uint32_t first_offset, offset, next_offset, header_size, slice_width;
int width, height, format, version, table_size;
int ret, i, j, k;
bytestream2_init(&gb, avpkt->data, avpkt->size);
if (bytestream2_get_le32(&gb) != MKTAG('M','A','G','Y'))
bytestream2_init(&gbyte, avpkt->data, avpkt->size);
if (bytestream2_get_le32(&gbyte) != MKTAG('M', 'A', 'G', 'Y'))
return AVERROR_INVALIDDATA;
header_size = bytestream2_get_le32(&gb);
if (header_size < 32 || header_size >= avpkt->size)
header_size = bytestream2_get_le32(&gbyte);
if (header_size < 32 || header_size >= avpkt->size) {
av_log(avctx, AV_LOG_ERROR,
"header or packet too small %"PRIu32"\n", header_size);
return AVERROR_INVALIDDATA;
}
version = bytestream2_get_byte(&gb);
version = bytestream2_get_byte(&gbyte);
if (version != 7) {
avpriv_request_sample(avctx, "unsupported version: %d", version);
avpriv_request_sample(avctx, "Version %d", version);
return AVERROR_PATCHWELCOME;
}
s->hshift[1] = s->vshift[1] = 0;
s->hshift[2] = s->vshift[2] = 0;
s->hshift[1] =
s->vshift[1] =
s->hshift[2] =
s->vshift[2] = 0;
s->decorrelate = 0;
format = bytestream2_get_byte(&gb);
format = bytestream2_get_byte(&gbyte);
switch (format) {
case 0x65:
avctx->pix_fmt = AV_PIX_FMT_GBRP;
s->decorrelate = 1;
s->planes = 3;
break;
case 0x66:
avctx->pix_fmt = AV_PIX_FMT_GBRAP;
s->decorrelate = 1;
s->planes = 4;
break;
case 0x67:
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
s->planes = 3;
break;
case 0x68:
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
s->planes = 3;
s->hshift[1] = s->hshift[2] = 1;
s->hshift[1] =
s->hshift[2] = 1;
break;
case 0x69:
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
s->planes = 3;
s->hshift[1] = s->vshift[1] = 1;
s->hshift[2] = s->vshift[2] = 1;
s->hshift[1] =
s->vshift[1] =
s->hshift[2] =
s->vshift[2] = 1;
break;
case 0x6a:
avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
s->planes = 4;
break;
case 0x6b:
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
s->planes = 1;
break;
default:
avpriv_request_sample(avctx, "unsupported format: 0x%X", format);
avpriv_request_sample(avctx, "Format 0x%X", format);
return AVERROR_PATCHWELCOME;
}
s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
bytestream2_skip(&gb, 2);
s->interlaced = !!(bytestream2_get_byte(&gb) & 2);
bytestream2_skip(&gb, 3);
bytestream2_skip(&gbyte, 2);
s->interlaced = !!(bytestream2_get_byte(&gbyte) & 2);
bytestream2_skip(&gbyte, 3);
width = bytestream2_get_le32(&gb);
height = bytestream2_get_le32(&gb);
if ((ret = ff_set_dimensions(avctx, width, height)) < 0)
width = bytestream2_get_le32(&gbyte);
height = bytestream2_get_le32(&gbyte);
ret = ff_set_dimensions(avctx, width, height);
if (ret < 0)
return ret;
slice_width = bytestream2_get_le32(&gb);
slice_width = bytestream2_get_le32(&gbyte);
if (slice_width != avctx->coded_width) {
avpriv_request_sample(avctx, "unsupported slice width: %d", slice_width);
avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
return AVERROR_PATCHWELCOME;
}
s->slice_height = bytestream2_get_le32(&gb);
if ((s->slice_height <= 0) || (s->slice_height > INT_MAX - avctx->coded_height)) {
av_log(avctx, AV_LOG_ERROR, "invalid slice height: %d\n", s->slice_height);
s->slice_height = bytestream2_get_le32(&gbyte);
if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
av_log(avctx, AV_LOG_ERROR,
"invalid slice height: %d\n", s->slice_height);
return AVERROR_INVALIDDATA;
}
bytestream2_skip(&gb, 4);
bytestream2_skip(&gbyte, 4);
s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
if (s->nb_slices > INT_MAX / sizeof(Slice)) {
av_log(avctx, AV_LOG_ERROR, "invalid number of slices: %d\n", s->nb_slices);
av_log(avctx, AV_LOG_ERROR,
"invalid number of slices: %d\n", s->nb_slices);
return AVERROR_INVALIDDATA;
}
......@@ -338,7 +340,7 @@ static int decode_frame(AVCodecContext *avctx,
if (!s->slices[i])
return AVERROR(ENOMEM);
offset = bytestream2_get_le32(&gb);
offset = bytestream2_get_le32(&gbyte);
if (offset >= avpkt->size - header_size)
return AVERROR_INVALIDDATA;
......@@ -347,46 +349,47 @@ static int decode_frame(AVCodecContext *avctx,
for (j = 0; j < s->nb_slices - 1; j++) {
s->slices[i][j].start = offset + header_size;
next_offset = bytestream2_get_le32(&gb);
s->slices[i][j].size = next_offset - offset;
offset = next_offset;
if (offset >= avpkt->size - header_size)
next_offset = bytestream2_get_le32(&gbyte);
if (next_offset <= offset || next_offset >= avpkt->size - header_size)
return AVERROR_INVALIDDATA;
s->slices[i][j].size = next_offset - offset;
offset = next_offset;
}
s->slices[i][j].start = offset + header_size;
s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
}
if (bytestream2_get_byte(&gb) != s->planes)
if (bytestream2_get_byte(&gbyte) != s->planes)
return AVERROR_INVALIDDATA;
bytestream2_skip(&gb, s->nb_slices * s->planes);
bytestream2_skip(&gbyte, s->nb_slices * s->planes);
table_size = header_size + first_offset - bytestream2_tell(&gb);
table_size = header_size + first_offset - bytestream2_tell(&gbyte);
if (table_size < 2)
return AVERROR_INVALIDDATA;
if ((ret = init_get_bits8(&b, avpkt->data + bytestream2_tell(&gb), table_size)) < 0)
ret = init_get_bits8(&gbit, avpkt->data + bytestream2_tell(&gbyte), table_size);
if (ret < 0)
return ret;
memset(s->len, 0, sizeof(s->len));
j = i = 0;
while (get_bits_left(&b) >= 8) {
int l = get_bits(&b, 4);
int x = get_bits(&b, 4);
int L = get_bitsz(&b, l) + 1;
while (get_bits_left(&gbit) >= 8) {
int b = get_bits(&gbit, 4);
int x = get_bits(&gbit, 4);
int l = get_bitsz(&gbit, b) + 1;
for (k = 0; k < L; k++) {
for (k = 0; k < l; k++)
if (j + k < 256)
s->len[i][j + k] = x;
}
j += L;
j += l;
if (j == 256) {
j = 0;
if (build_huff(&s->vlc[i], s->len[i])) {
if (huff_build(&s->vlc[i], s->len[i])) {
av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
return AVERROR_INVALIDDATA;
}
......@@ -412,7 +415,7 @@ static int decode_frame(AVCodecContext *avctx,
s->buf = avpkt->data;
s->p = p;
avctx->execute2(avctx, decode_slice, NULL, NULL, s->nb_slices);
avctx->execute2(avctx, magy_decode_slice, NULL, NULL, s->nb_slices);
if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
......@@ -422,58 +425,57 @@ static int decode_frame(AVCodecContext *avctx,
*got_frame = 1;
if (ret < 0)
return ret;
return avpkt->size;
}
#if HAVE_THREADS
static int decode_init_thread_copy(AVCodecContext *avctx)
static int magy_init_thread_copy(AVCodecContext *avctx)
{
MagicYUVContext *s = avctx->priv_data;
int i;
s->slices[0] = 0;
s->slices[1] = 0;
s->slices[2] = 0;
s->slices[3] = 0;
s->slices_size[0] = 0;
s->slices_size[1] = 0;
s->slices_size[2] = 0;
s->slices_size[3] = 0;
for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
s->slices[i] = NULL;
s->slices_size[i] = 0;
}
return 0;
}
#endif
static av_cold int decode_end(AVCodecContext *avctx)
static av_cold int magy_decode_init(AVCodecContext *avctx)
{
MagicYUVContext *s = avctx->priv_data;
ff_huffyuvdsp_init(&s->hdsp);
return 0;
}
static av_cold int magy_decode_end(AVCodecContext *avctx)
{
MagicYUVContext * const s = avctx->priv_data;
int i;
av_freep(&s->slices[0]);
av_freep(&s->slices[1]);
av_freep(&s->slices[2]);
av_freep(&s->slices[3]);
s->slices_size[0] = 0;
s->slices_size[1] = 0;
s->slices_size[2] = 0;
s->slices_size[3] = 0;
ff_free_vlc(&s->vlc[0]);
ff_free_vlc(&s->vlc[1]);
ff_free_vlc(&s->vlc[2]);
ff_free_vlc(&s->vlc[3]);
for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
av_freep(&s->slices[i]);
s->slices_size[i] = 0;
ff_free_vlc(&s->vlc[i]);
}
return 0;
}
AVCodec ff_magicyuv_decoder = {
.name = "magicyuv",
.long_name = NULL_IF_CONFIG_SMALL("MagicYUV Lossless Video"),
.long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MAGICYUV,
.priv_data_size = sizeof(MagicYUVContext),
.init = decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
.close = decode_end,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
.init = magy_decode_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(magy_init_thread_copy),
.close = magy_decode_end,
.decode = magy_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment