FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #define CACHED_BITSTREAM_READER !ARCH_X86_32
26 
27 #include "libavutil/mem.h"
28 #include "libavutil/pixdesc.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "codec_internal.h"
33 #include "decode.h"
34 #include "get_bits.h"
35 #include "lossless_videodsp.h"
36 #include "thread.h"
37 
38 #define VLC_BITS 12
39 
40 typedef struct Slice {
41  uint32_t start;
42  uint32_t size;
43 } Slice;
44 
45 typedef enum Prediction {
46  LEFT = 1,
49 } Prediction;
50 
51 typedef struct HuffEntry {
52  uint8_t len;
53  uint16_t sym;
54 } HuffEntry;
55 
56 typedef struct MagicYUVContext {
58  int max;
59  int bps;
61  int nb_slices;
62  int planes; // number of encoded planes in bitstream
63  int decorrelate; // postprocessing work
64  int color_matrix; // video color matrix
65  int flags;
66  int interlaced; // video is interlaced
67  const uint8_t *buf; // pointer to AVPacket->data
68  int hshift[4];
69  int vshift[4];
70  Slice *slices[4]; // slice bitstream positions for each plane
71  unsigned int slices_size[4]; // slice sizes for each plane
72  VLC vlc[4]; // VLC for each plane
73  VLC_MULTI multi[4]; // Buffer for joint VLC data
74  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
75  int j, int threadnr);
77  HuffEntry he[1 << 14];
78  uint8_t len[1 << 14];
80 
81 static int huff_build(AVCodecContext *avctx,
82  const uint8_t len[], uint16_t codes_pos[33],
83  VLC *vlc, VLC_MULTI *multi, int nb_elems, void *logctx)
84 {
85  MagicYUVContext *s = avctx->priv_data;
86  HuffEntry *he = s->he;
87 
88  for (int i = 31; i > 0; i--)
89  codes_pos[i] += codes_pos[i + 1];
90 
91  for (unsigned i = nb_elems; i-- > 0;)
92  he[--codes_pos[len[i]]] = (HuffEntry){ len[i], i };
93 
94  ff_vlc_free(vlc);
95  ff_vlc_free_multi(multi);
96  return ff_vlc_init_multi_from_lengths(vlc, multi, FFMIN(he[0].len, VLC_BITS), nb_elems, nb_elems,
97  &he[0].len, sizeof(he[0]),
98  &he[0].sym, sizeof(he[0]), sizeof(he[0].sym),
99  0, 0, logctx);
100 }
101 
102 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
103  const uint16_t *diff, intptr_t w,
104  int *left, int *left_top, int max)
105 {
106  int i;
107  uint16_t l, lt;
108 
109  l = *left;
110  lt = *left_top;
111 
112  for (i = 0; i < w; i++) {
113  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
114  l &= max;
115  lt = src1[i];
116  dst[i] = l;
117  }
118 
119  *left = l;
120  *left_top = lt;
121 }
122 
123 #define READ_PLANE(dst, plane, b, c) \
124 { \
125  x = 0; \
126  for (; CACHED_BITSTREAM_READER && x < width-c && get_bits_left(&gb) > 0;) {\
127  ret = get_vlc_multi(&gb, (uint8_t *)dst + x * b, multi, \
128  vlc, vlc_bits, 3, b); \
129  if (ret <= 0) \
130  return AVERROR_INVALIDDATA; \
131  x += ret; \
132  } \
133  for (; x < width && get_bits_left(&gb) > 0; x++) \
134  dst[x] = get_vlc2(&gb, vlc, vlc_bits, 3); \
135  dst += stride; \
136 }
137 
138 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
139  int j, int threadnr)
140 {
141  const MagicYUVContext *s = avctx->priv_data;
142  int interlaced = s->interlaced;
143  const int bps = s->bps;
144  const int max = s->max - 1;
145  AVFrame *p = s->p;
146  int i, k, x;
147  GetBitContext gb;
148  uint16_t *dst;
149 
150  for (i = 0; i < s->planes; i++) {
151  int left, lefttop, top;
152  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
153  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
154  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
155  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
156  ptrdiff_t stride = p->linesize[i] / 2;
157  const VLC_MULTI_ELEM *const multi = s->multi[i].table;
158  const VLCElem *const vlc = s->vlc[i].table;
159  const int vlc_bits = s->vlc[i].bits;
160  int flags, pred;
161  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
162  s->slices[i][j].size);
163 
164  if (ret < 0)
165  return ret;
166 
167  flags = get_bits(&gb, 8);
168  pred = get_bits(&gb, 8);
169 
170  dst = (uint16_t *)p->data[i] + j * sheight * stride;
171  if (flags & 1) {
172  if (get_bits_left(&gb) < bps * width * height)
173  return AVERROR_INVALIDDATA;
174  for (k = 0; k < height; k++) {
175  for (x = 0; x < width; x++)
176  dst[x] = get_bits(&gb, bps);
177 
178  dst += stride;
179  }
180  } else {
181  for (k = 0; k < height; k++)
182  READ_PLANE(dst, i, 2, 3)
183  }
184 
185  switch (pred) {
186  case LEFT:
187  dst = (uint16_t *)p->data[i] + j * sheight * stride;
188  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
189  dst += stride;
190  if (interlaced) {
191  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
192  dst += stride;
193  }
194  for (k = 1 + interlaced; k < height; k++) {
195  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
196  dst += stride;
197  }
198  break;
199  case GRADIENT:
200  dst = (uint16_t *)p->data[i] + j * sheight * stride;
201  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
202  dst += stride;
203  if (interlaced) {
204  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
205  dst += stride;
206  }
207  for (k = 1 + interlaced; k < height; k++) {
208  top = dst[-fake_stride];
209  left = top + dst[0];
210  dst[0] = left & max;
211  for (x = 1; x < width; x++) {
212  top = dst[x - fake_stride];
213  lefttop = dst[x - (fake_stride + 1)];
214  left += top - lefttop + dst[x];
215  dst[x] = left & max;
216  }
217  dst += stride;
218  }
219  break;
220  case MEDIAN:
221  dst = (uint16_t *)p->data[i] + j * sheight * stride;
222  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
223  dst += stride;
224  if (interlaced) {
225  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
226  dst += stride;
227  }
228  lefttop = left = dst[0];
229  for (k = 1 + interlaced; k < height; k++) {
230  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
231  lefttop = left = dst[0];
232  dst += stride;
233  }
234  break;
235  default:
236  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
237  }
238  }
239 
240  if (s->decorrelate) {
241  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
242  int width = avctx->coded_width;
243  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
244  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
245  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
246 
247  for (i = 0; i < height; i++) {
248  for (k = 0; k < width; k++) {
249  b[k] = (b[k] + g[k]) & max;
250  r[k] = (r[k] + g[k]) & max;
251  }
252  b += p->linesize[0] / 2;
253  g += p->linesize[1] / 2;
254  r += p->linesize[2] / 2;
255  }
256  }
257 
258  return 0;
259 }
260 
261 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
262  int j, int threadnr)
263 {
264  const MagicYUVContext *s = avctx->priv_data;
265  int interlaced = s->interlaced;
266  AVFrame *p = s->p;
267  int i, k, x, min_width;
268  GetBitContext gb;
269  uint8_t *dst;
270 
271  for (i = 0; i < s->planes; i++) {
272  int left, lefttop, top;
273  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
274  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
275  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
276  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
277  ptrdiff_t stride = p->linesize[i];
278  const uint8_t *slice = s->buf + s->slices[i][j].start;
279  const VLC_MULTI_ELEM *const multi = s->multi[i].table;
280  const VLCElem *const vlc = s->vlc[i].table;
281  const int vlc_bits = s->vlc[i].bits;
282  int flags, pred;
283 
284  flags = bytestream_get_byte(&slice);
285  pred = bytestream_get_byte(&slice);
286 
287  dst = p->data[i] + j * sheight * stride;
288  if (flags & 1) {
289  if (s->slices[i][j].size - 2 < width * height)
290  return AVERROR_INVALIDDATA;
291  for (k = 0; k < height; k++) {
292  bytestream_get_buffer(&slice, dst, width);
293  dst += stride;
294  }
295  } else {
296  int ret = init_get_bits8(&gb, slice, s->slices[i][j].size - 2);
297 
298  if (ret < 0)
299  return ret;
300 
301  for (k = 0; k < height; k++)
302  READ_PLANE(dst, i, 1, 7)
303  }
304 
305  switch (pred) {
306  case LEFT:
307  dst = p->data[i] + j * sheight * stride;
308  s->llviddsp.add_left_pred(dst, dst, width, 0);
309  dst += stride;
310  if (interlaced) {
311  s->llviddsp.add_left_pred(dst, dst, width, 0);
312  dst += stride;
313  }
314  for (k = 1 + interlaced; k < height; k++) {
315  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
316  dst += stride;
317  }
318  break;
319  case GRADIENT:
320  dst = p->data[i] + j * sheight * stride;
321  s->llviddsp.add_left_pred(dst, dst, width, 0);
322  dst += stride;
323  if (interlaced) {
324  s->llviddsp.add_left_pred(dst, dst, width, 0);
325  dst += stride;
326  }
327  min_width = FFMIN(width, 32);
328  for (k = 1 + interlaced; k < height; k++) {
329  top = dst[-fake_stride];
330  left = top + dst[0];
331  dst[0] = left;
332  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
333  top = dst[x - fake_stride];
334  lefttop = dst[x - (fake_stride + 1)];
335  left += top - lefttop + dst[x];
336  dst[x] = left;
337  }
338  if (width > 32)
339  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
340  dst += stride;
341  }
342  break;
343  case MEDIAN:
344  dst = p->data[i] + j * sheight * stride;
345  s->llviddsp.add_left_pred(dst, dst, width, 0);
346  dst += stride;
347  if (interlaced) {
348  s->llviddsp.add_left_pred(dst, dst, width, 0);
349  dst += stride;
350  }
351  lefttop = left = dst[0];
352  for (k = 1 + interlaced; k < height; k++) {
353  s->llviddsp.add_median_pred(dst, dst - fake_stride,
354  dst, width, &left, &lefttop);
355  lefttop = left = dst[0];
356  dst += stride;
357  }
358  break;
359  default:
360  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
361  }
362  }
363 
364  if (s->decorrelate) {
365  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
366  int width = avctx->coded_width;
367  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
368  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
369  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
370 
371  for (i = 0; i < height; i++) {
372  s->llviddsp.add_bytes(b, g, width);
373  s->llviddsp.add_bytes(r, g, width);
374  b += p->linesize[0];
375  g += p->linesize[1];
376  r += p->linesize[2];
377  }
378  }
379 
380  return 0;
381 }
382 
383 static int build_huffman(AVCodecContext *avctx, const uint8_t *table,
384  int table_size, int max)
385 {
386  MagicYUVContext *s = avctx->priv_data;
387  GetByteContext gb;
388  uint8_t *len = s->len;
389  uint16_t length_count[33] = { 0 };
390  int i = 0, j = 0, k;
391 
392  bytestream2_init(&gb, table, table_size);
393 
394  while (bytestream2_get_bytes_left(&gb) > 0) {
395  int b = bytestream2_peek_byteu(&gb) & 0x80;
396  int x = bytestream2_get_byteu(&gb) & ~0x80;
397  int l = 1;
398 
399  if (b) {
400  if (bytestream2_get_bytes_left(&gb) <= 0)
401  break;
402  l += bytestream2_get_byteu(&gb);
403  }
404  k = j + l;
405  if (k > max || x == 0 || x > 32) {
406  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
407  return AVERROR_INVALIDDATA;
408  }
409 
410  length_count[x] += l;
411  for (; j < k; j++)
412  len[j] = x;
413 
414  if (j == max) {
415  j = 0;
416  if (huff_build(avctx, len, length_count, &s->vlc[i], &s->multi[i], max, avctx)) {
417  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
418  return AVERROR_INVALIDDATA;
419  }
420  i++;
421  if (i == s->planes) {
422  break;
423  }
424  memset(length_count, 0, sizeof(length_count));
425  }
426  }
427 
428  if (i != s->planes) {
429  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
430  return AVERROR_INVALIDDATA;
431  }
432 
433  return 0;
434 }
435 
437  int *got_frame, AVPacket *avpkt)
438 {
439  MagicYUVContext *s = avctx->priv_data;
440  GetByteContext gb;
441  uint32_t first_offset, offset, next_offset, header_size, slice_width;
442  int width, height, format, version, table_size;
443  int ret, i, j;
444 
445  if (avpkt->size < 36)
446  return AVERROR_INVALIDDATA;
447 
448  bytestream2_init(&gb, avpkt->data, avpkt->size);
449  if (bytestream2_get_le32u(&gb) != MKTAG('M', 'A', 'G', 'Y'))
450  return AVERROR_INVALIDDATA;
451 
452  header_size = bytestream2_get_le32u(&gb);
453  if (header_size < 32 || header_size >= avpkt->size) {
454  av_log(avctx, AV_LOG_ERROR,
455  "header or packet too small %"PRIu32"\n", header_size);
456  return AVERROR_INVALIDDATA;
457  }
458 
459  version = bytestream2_get_byteu(&gb);
460  if (version != 7) {
461  avpriv_request_sample(avctx, "Version %d", version);
462  return AVERROR_PATCHWELCOME;
463  }
464 
465  s->hshift[1] =
466  s->vshift[1] =
467  s->hshift[2] =
468  s->vshift[2] = 0;
469  s->decorrelate = 0;
470  s->bps = 8;
471 
472  format = bytestream2_get_byteu(&gb);
473  switch (format) {
474  case 0x65:
475  avctx->pix_fmt = AV_PIX_FMT_GBRP;
476  s->decorrelate = 1;
477  break;
478  case 0x66:
479  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
480  s->decorrelate = 1;
481  break;
482  case 0x67:
483  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
484  break;
485  case 0x68:
486  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
487  s->hshift[1] =
488  s->hshift[2] = 1;
489  break;
490  case 0x69:
491  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
492  s->hshift[1] =
493  s->vshift[1] =
494  s->hshift[2] =
495  s->vshift[2] = 1;
496  break;
497  case 0x6a:
498  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
499  break;
500  case 0x6b:
501  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
502  break;
503  case 0x6c:
504  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
505  s->hshift[1] =
506  s->hshift[2] = 1;
507  s->bps = 10;
508  break;
509  case 0x76:
510  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
511  s->bps = 10;
512  break;
513  case 0x6d:
514  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
515  s->decorrelate = 1;
516  s->bps = 10;
517  break;
518  case 0x6e:
519  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
520  s->decorrelate = 1;
521  s->bps = 10;
522  break;
523  case 0x6f:
524  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
525  s->decorrelate = 1;
526  s->bps = 12;
527  break;
528  case 0x70:
529  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
530  s->decorrelate = 1;
531  s->bps = 12;
532  break;
533  case 0x71:
534  avctx->pix_fmt = AV_PIX_FMT_GBRP14;
535  s->decorrelate = 1;
536  s->bps = 14;
537  break;
538  case 0x72:
539  avctx->pix_fmt = AV_PIX_FMT_GBRAP14;
540  s->decorrelate = 1;
541  s->bps = 14;
542  break;
543  case 0x73:
544  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
545  s->bps = 10;
546  break;
547  case 0x7b:
548  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
549  s->hshift[1] =
550  s->vshift[1] =
551  s->hshift[2] =
552  s->vshift[2] = 1;
553  s->bps = 10;
554  break;
555  default:
556  avpriv_request_sample(avctx, "Format 0x%X", format);
557  return AVERROR_PATCHWELCOME;
558  }
559  s->max = 1 << s->bps;
560  s->magy_decode_slice = s->bps == 8 ? magy_decode_slice : magy_decode_slice10;
561  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
562 
563  bytestream2_skipu(&gb, 1);
564  s->color_matrix = bytestream2_get_byteu(&gb);
565  s->flags = bytestream2_get_byteu(&gb);
566  s->interlaced = !!(s->flags & 2);
567  bytestream2_skipu(&gb, 3);
568 
569  width = bytestream2_get_le32u(&gb);
570  height = bytestream2_get_le32u(&gb);
571  ret = ff_set_dimensions(avctx, width, height);
572  if (ret < 0)
573  return ret;
574 
575  slice_width = bytestream2_get_le32u(&gb);
576  if (slice_width != avctx->coded_width) {
577  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
578  return AVERROR_PATCHWELCOME;
579  }
580  s->slice_height = bytestream2_get_le32u(&gb);
581  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
582  av_log(avctx, AV_LOG_ERROR,
583  "invalid slice height: %d\n", s->slice_height);
584  return AVERROR_INVALIDDATA;
585  }
586 
587  bytestream2_skipu(&gb, 4);
588 
589  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
590  if (s->nb_slices > INT_MAX / FFMAX(sizeof(Slice), 4 * 5)) {
591  av_log(avctx, AV_LOG_ERROR,
592  "invalid number of slices: %d\n", s->nb_slices);
593  return AVERROR_INVALIDDATA;
594  }
595 
596  if (s->interlaced) {
597  if ((s->slice_height >> s->vshift[1]) < 2) {
598  av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
599  return AVERROR_INVALIDDATA;
600  }
601  if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
602  av_log(avctx, AV_LOG_ERROR, "impossible height\n");
603  return AVERROR_INVALIDDATA;
604  }
605  }
606 
607  if (bytestream2_get_bytes_left(&gb) <= s->nb_slices * s->planes * 5)
608  return AVERROR_INVALIDDATA;
609  for (i = 0; i < s->planes; i++) {
610  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
611  if (!s->slices[i])
612  return AVERROR(ENOMEM);
613 
614  offset = bytestream2_get_le32u(&gb);
615  if (offset >= avpkt->size - header_size)
616  return AVERROR_INVALIDDATA;
617 
618  if (i == 0)
619  first_offset = offset;
620 
621  for (j = 0; j < s->nb_slices - 1; j++) {
622  s->slices[i][j].start = offset + header_size;
623 
624  next_offset = bytestream2_get_le32u(&gb);
625  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
626  return AVERROR_INVALIDDATA;
627 
628  s->slices[i][j].size = next_offset - offset;
629  if (s->slices[i][j].size < 2)
630  return AVERROR_INVALIDDATA;
631  offset = next_offset;
632  }
633 
634  s->slices[i][j].start = offset + header_size;
635  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
636 
637  if (s->slices[i][j].size < 2)
638  return AVERROR_INVALIDDATA;
639  }
640 
641  if (bytestream2_get_byteu(&gb) != s->planes)
642  return AVERROR_INVALIDDATA;
643 
644  bytestream2_skipu(&gb, s->nb_slices * s->planes);
645 
646  table_size = header_size + first_offset - bytestream2_tell(&gb);
647  if (table_size < 2)
648  return AVERROR_INVALIDDATA;
649 
650  ret = build_huffman(avctx, avpkt->data + bytestream2_tell(&gb),
651  table_size, s->max);
652  if (ret < 0)
653  return ret;
654 
656  p->flags |= AV_FRAME_FLAG_KEY;
657 
658  if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
659  return ret;
660 
661  s->buf = avpkt->data;
662  s->p = p;
663  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
664 
665  if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
666  avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
667  avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
668  avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
669  avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
670  avctx->pix_fmt == AV_PIX_FMT_GBRAP14||
671  avctx->pix_fmt == AV_PIX_FMT_GBRP12||
672  avctx->pix_fmt == AV_PIX_FMT_GBRP14) {
673  FFSWAP(uint8_t*, p->data[0], p->data[1]);
674  FFSWAP(int, p->linesize[0], p->linesize[1]);
675  } else {
676  switch (s->color_matrix) {
677  case 1:
679  break;
680  case 2:
682  break;
683  }
684  p->color_range = (s->flags & 4) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
685  }
686 
687  *got_frame = 1;
688 
689  return avpkt->size;
690 }
691 
693 {
694  MagicYUVContext *s = avctx->priv_data;
695  ff_llviddsp_init(&s->llviddsp);
696  return 0;
697 }
698 
700 {
701  MagicYUVContext * const s = avctx->priv_data;
702  int i;
703 
704  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
705  av_freep(&s->slices[i]);
706  s->slices_size[i] = 0;
707  ff_vlc_free(&s->vlc[i]);
708  ff_vlc_free_multi(&s->multi[i]);
709  }
710 
711  return 0;
712 }
713 
715  .p.name = "magicyuv",
716  CODEC_LONG_NAME("MagicYUV video"),
717  .p.type = AVMEDIA_TYPE_VIDEO,
718  .p.id = AV_CODEC_ID_MAGICYUV,
719  .priv_data_size = sizeof(MagicYUVContext),
721  .close = magy_decode_end,
723  .p.capabilities = AV_CODEC_CAP_DR1 |
726 };
ff_magicyuv_decoder
const FFCodec ff_magicyuv_decoder
Definition: magicyuv.c:714
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:653
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
Prediction
Definition: aptx.h:70
GetByteContext
Definition: bytestream.h:33
HuffEntry::len
uint8_t len
Definition: exr.c:96
MEDIAN
@ MEDIAN
Definition: magicyuv.c:48
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:61
src1
const pixel * src1
Definition: h264pred_template.c:421
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:68
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
VLC_MULTI_ELEM
Definition: vlc.h:42
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:664
MagicYUVContext::color_matrix
int color_matrix
Definition: magicyuv.c:64
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
MagicYUVContext::llviddsp
LLVidDSPContext llviddsp
Definition: magicyuv.c:76
VLC_BITS
#define VLC_BITS
Definition: magicyuv.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:524
b
#define b
Definition: input.c:41
table
static const uint16_t table[]
Definition: prosumer.c:205
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
FFCodec
Definition: codec_internal.h:126
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:646
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
build_huffman
static int build_huffman(AVCodecContext *avctx, const uint8_t *table, int table_size, int max)
Definition: magicyuv.c:383
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
Slice::size
uint32_t size
Definition: magicyuv.c:42
magy_decode_slice10
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:138
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
MagicYUVContext
Definition: magicyuv.c:56
Slice::start
uint32_t start
Definition: magicyuv.c:41
MagicYUVContext::max
int max
Definition: magicyuv.c:58
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3005
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
MagicYUVContext::bps
int bps
Definition: magicyuv.c:59
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
GetBitContext
Definition: get_bits.h:108
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
LLVidDSPContext
Definition: lossless_videodsp.h:28
huff_build
static int huff_build(AVCodecContext *avctx, const uint8_t len[], uint16_t codes_pos[33], VLC *vlc, VLC_MULTI *multi, int nb_elems, void *logctx)
Definition: magicyuv.c:81
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
MagicYUVContext::slices_size
unsigned int slices_size[4]
Definition: magicyuv.c:71
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:286
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP14
#define AV_PIX_FMT_GBRAP14
Definition: pixfmt.h:500
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:499
VLC_MULTI
Definition: vlc.h:51
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:59
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
g
const char * g
Definition: vf_curves.c:128
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:975
MagicYUVContext::multi
VLC_MULTI multi[4]
Definition: magicyuv.c:73
ff_vlc_free_multi
void ff_vlc_free_multi(VLC_MULTI *vlc)
Definition: vlc.c:575
HuffEntry::sym
uint16_t sym
Definition: exr.c:97
GRADIENT
@ GRADIENT
Definition: magicyuv.c:47
decode.h
get_bits.h
Slice
Definition: magicyuv.c:40
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
MagicYUVContext::decorrelate
int decorrelate
Definition: magicyuv.c:63
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:459
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:69
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
magy_decode_init
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:692
MagicYUVContext::len
uint8_t len[1<< 14]
Definition: magicyuv.c:78
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_vlc_init_multi_from_lengths
int ff_vlc_init_multi_from_lengths(VLC *vlc, VLC_MULTI *multi, int nb_bits, int nb_elems, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc_multi()
Definition: vlc.c:517
Prediction
Prediction
Definition: magicyuv.c:45
MagicYUVContext::magy_decode_slice
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:74
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:70
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: codec_id.h:270
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
MagicYUVContext::he
HuffEntry he[1<< 14]
Definition: magicyuv.c:77
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:476
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:525
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bps
unsigned bps
Definition: movenc.c:1788
VLCElem
Definition: vlc.h:32
MagicYUVContext::p
AVFrame * p
Definition: magicyuv.c:57
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:60
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MagicYUVContext::flags
int flags
Definition: magicyuv.c:65
version
version
Definition: libkvazaar.c:321
interlaced
uint8_t interlaced
Definition: mxfenc.c:2264
MagicYUVContext::vlc
VLC vlc[4]
Definition: magicyuv.c:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
MagicYUVContext::interlaced
int interlaced
Definition: magicyuv.c:66
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:669
magy_decode_end
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:699
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
bytestream_get_buffer
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
Definition: bytestream.h:363
mid_pred
#define mid_pred
Definition: mathops.h:98
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
magy_decode_frame
static int magy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:436
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
AVCodecContext
main external API structure.
Definition: avcodec.h:445
magicyuv_median_pred16
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:102
VLC
Definition: vlc.h:36
READ_PLANE
#define READ_PLANE(dst, plane, b, c)
Definition: magicyuv.c:123
HuffEntry
Definition: exr.c:95
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
lossless_videodsp.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
MagicYUVContext::planes
int planes
Definition: magicyuv.c:62
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
MagicYUVContext::buf
const uint8_t * buf
Definition: magicyuv.c:67
magy_decode_slice
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:261
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
int
int
Definition: ffmpeg_filter.c:424
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1631
LEFT
@ LEFT
Definition: magicyuv.c:46