FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include <stdint.h>
27 #include <string.h>
28 #include <sys/types.h>
29 
30 #include <mfxvideo.h>
31 
32 #include "libavutil/common.h"
33 #include "libavutil/fifo.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
37 #include "libavutil/mem.h"
38 #include "libavutil/log.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/pixfmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/imgutils.h"
45 
46 #include "avcodec.h"
47 #include "codec_internal.h"
48 #include "internal.h"
49 #include "decode.h"
50 #include "hwconfig.h"
51 #include "qsv.h"
52 #include "qsv_internal.h"
53 #include "refstruct.h"
54 
55 #if QSV_ONEVPL
56 #include <mfxdispatcher.h>
57 #else
58 #define MFXUnload(a) do { } while(0)
59 #endif
60 
61 static const AVRational mfx_tb = { 1, 90000 };
62 
63 #define PTS_TO_MFX_PTS(pts, pts_tb) ((pts) == AV_NOPTS_VALUE ? \
64  MFX_TIMESTAMP_UNKNOWN : pts_tb.num ? \
65  av_rescale_q(pts, pts_tb, mfx_tb) : pts)
66 
67 #define MFX_PTS_TO_PTS(mfx_pts, pts_tb) ((mfx_pts) == MFX_TIMESTAMP_UNKNOWN ? \
68  AV_NOPTS_VALUE : pts_tb.num ? \
69  av_rescale_q(mfx_pts, mfx_tb, pts_tb) : mfx_pts)
70 
71 typedef struct QSVAsyncFrame {
72  mfxSyncPoint *sync;
75 
76 typedef struct QSVContext {
77  // the session used for decoding
78  mfxSession session;
79  mfxVersion ver;
80 
81  // the session we allocated internally, in case the caller did not provide
82  // one
84 
86 
87  /**
88  * a linked list of frames currently being used by QSV
89  */
91 
95 
97  uint32_t fourcc;
98  mfxFrameInfo frame_info;
102 
103  // options set by the caller
106  int gpu_copy;
107 
109 
110  mfxExtBuffer **ext_buffers;
112 } QSVContext;
113 
114 static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
115  &(const AVCodecHWConfigInternal) {
116  .public = {
120  .device_type = AV_HWDEVICE_TYPE_QSV,
121  },
122  .hwaccel = NULL,
123  },
124  NULL
125 };
126 
128  AVBufferPool *pool)
129 {
130  int ret = 0;
131 
132  ret = ff_decode_frame_props(avctx, frame);
133  if (ret < 0)
134  return ret;
135 
136  frame->width = avctx->width;
137  frame->height = avctx->height;
138 
139  switch (avctx->pix_fmt) {
140  case AV_PIX_FMT_NV12:
141  frame->linesize[0] = FFALIGN(avctx->width, 128);
142  break;
143  case AV_PIX_FMT_P010:
144  case AV_PIX_FMT_P012:
145  case AV_PIX_FMT_YUYV422:
146  frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
147  break;
148  case AV_PIX_FMT_Y210:
149  case AV_PIX_FMT_VUYX:
150  case AV_PIX_FMT_XV30:
151  case AV_PIX_FMT_Y212:
152  frame->linesize[0] = 4 * FFALIGN(avctx->width, 128);
153  break;
154  case AV_PIX_FMT_XV36:
155  frame->linesize[0] = 8 * FFALIGN(avctx->width, 128);
156  break;
157  default:
158  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
159  return AVERROR(EINVAL);
160  }
161 
162  frame->buf[0] = av_buffer_pool_get(pool);
163  if (!frame->buf[0])
164  return AVERROR(ENOMEM);
165 
166  frame->data[0] = frame->buf[0]->data;
167  if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
168  avctx->pix_fmt == AV_PIX_FMT_P010 ||
169  avctx->pix_fmt == AV_PIX_FMT_P012) {
170  frame->linesize[1] = frame->linesize[0];
171  frame->data[1] = frame->data[0] +
172  frame->linesize[0] * FFALIGN(avctx->height, 64);
173  }
174 
176  if (ret < 0)
177  return ret;
178 
179  return 0;
180 }
181 
182 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
183  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
184 {
185  int ret;
186 
187  if (q->gpu_copy == MFX_GPUCOPY_ON &&
188  !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
189  av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
190  "only works in system memory mode.\n");
191  q->gpu_copy = MFX_GPUCOPY_OFF;
192  }
193  if (session) {
194  q->session = session;
195  } else if (hw_frames_ref) {
196  if (q->internal_qs.session) {
197  MFXClose(q->internal_qs.session);
198  q->internal_qs.session = NULL;
199  }
201 
202  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
203  if (!q->frames_ctx.hw_frames_ctx)
204  return AVERROR(ENOMEM);
205 
207  &q->frames_ctx, q->load_plugins,
208 #if QSV_HAVE_OPAQUE
209  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
210 #else
211  0,
212 #endif
213  q->gpu_copy);
214  if (ret < 0) {
216  return ret;
217  }
218 
219  q->session = q->internal_qs.session;
220  } else if (hw_device_ref) {
221  if (q->internal_qs.session) {
222  MFXClose(q->internal_qs.session);
223  q->internal_qs.session = NULL;
224  }
225 
227  hw_device_ref, q->load_plugins, q->gpu_copy);
228  if (ret < 0)
229  return ret;
230 
231  q->session = q->internal_qs.session;
232  } else {
233  if (!q->internal_qs.session) {
235  q->load_plugins, q->gpu_copy);
236  if (ret < 0)
237  return ret;
238  }
239 
240  q->session = q->internal_qs.session;
241  }
242 
243  if (MFXQueryVersion(q->session, &q->ver) != MFX_ERR_NONE) {
244  av_log(avctx, AV_LOG_ERROR, "Error querying the session version. \n");
245  q->session = NULL;
246 
247  if (q->internal_qs.session) {
248  MFXClose(q->internal_qs.session);
249  q->internal_qs.session = NULL;
250  }
251 
252  if (q->internal_qs.loader) {
254  q->internal_qs.loader = NULL;
255  }
256 
257  return AVERROR_EXTERNAL;
258  }
259 
260  /* make sure the decoder is uninitialized */
261  MFXVideoDECODE_Close(q->session);
262 
263  return 0;
264 }
265 
266 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
267 {
268  mfxSession session = NULL;
269  int iopattern = 0;
270  int ret;
271  enum AVPixelFormat pix_fmts[3] = {
272  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
273  pix_fmt, /* system memory format obtained from bitstream parser */
274  AV_PIX_FMT_NONE };
275 
276  ret = ff_get_format(avctx, pix_fmts);
277  if (ret < 0) {
278  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
279  return ret;
280  }
281 
282  if (!q->async_fifo) {
283  q->async_fifo = av_fifo_alloc2(q->async_depth, sizeof(QSVAsyncFrame), 0);
284  if (!q->async_fifo)
285  return AVERROR(ENOMEM);
286  }
287 
288  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
289  AVQSVContext *user_ctx = avctx->hwaccel_context;
290  session = user_ctx->session;
291  iopattern = user_ctx->iopattern;
292  q->ext_buffers = user_ctx->ext_buffers;
293  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
294  }
295 
296  if (avctx->hw_device_ctx && !avctx->hw_frames_ctx && ret == AV_PIX_FMT_QSV) {
297  AVHWFramesContext *hwframes_ctx;
298  AVQSVFramesContext *frames_hwctx;
299 
301 
302  if (!avctx->hw_frames_ctx) {
303  av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
304  return AVERROR(ENOMEM);
305  }
306 
307  hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
308  frames_hwctx = hwframes_ctx->hwctx;
309  hwframes_ctx->width = FFALIGN(avctx->coded_width, 32);
310  hwframes_ctx->height = FFALIGN(avctx->coded_height, 32);
311  hwframes_ctx->format = AV_PIX_FMT_QSV;
312  hwframes_ctx->sw_format = avctx->sw_pix_fmt;
313  hwframes_ctx->initial_pool_size = q->suggest_pool_size + 16 + avctx->extra_hw_frames;
314  frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
315 
317 
318  if (ret < 0) {
319  av_log(NULL, AV_LOG_ERROR, "Error initializing a QSV frame pool\n");
321  return ret;
322  }
323  }
324 
325  if (avctx->hw_frames_ctx) {
326  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
327  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
328 
329  if (!iopattern) {
330 #if QSV_HAVE_OPAQUE
331  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
332  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
333  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
334  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
335 #else
336  if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
337  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
338 #endif
339  }
340  }
341 
342  if (!iopattern)
343  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
344  q->iopattern = iopattern;
345 
346  ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder");
347 
348  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
349  if (ret < 0) {
350  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
351  return ret;
352  }
353 
354  param->IOPattern = q->iopattern;
355  param->AsyncDepth = q->async_depth;
356  param->ExtParam = q->ext_buffers;
357  param->NumExtParam = q->nb_ext_buffers;
358 
359  return 0;
360  }
361 
362 static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
363 {
364  int ret;
365 
366  avctx->width = param->mfx.FrameInfo.CropW;
367  avctx->height = param->mfx.FrameInfo.CropH;
368  avctx->coded_width = param->mfx.FrameInfo.Width;
369  avctx->coded_height = param->mfx.FrameInfo.Height;
370  avctx->level = param->mfx.CodecLevel;
371  avctx->profile = param->mfx.CodecProfile;
372  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
373  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
374 
375  ret = MFXVideoDECODE_Init(q->session, param);
376  if (ret < 0)
377  return ff_qsv_print_error(avctx, ret,
378  "Error initializing the MFX video decoder");
379 
380  q->frame_info = param->mfx.FrameInfo;
381 
382  if (!avctx->hw_frames_ctx)
384  FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1), av_buffer_allocz);
385  return 0;
386 }
387 
389  const AVPacket *avpkt, enum AVPixelFormat pix_fmt,
390  mfxVideoParam *param)
391 {
392  int ret;
393  mfxExtVideoSignalInfo video_signal_info = { 0 };
394  mfxExtBuffer *header_ext_params[1] = { (mfxExtBuffer *)&video_signal_info };
395  mfxBitstream bs = { 0 };
396 
397  if (avpkt->size) {
398  bs.Data = avpkt->data;
399  bs.DataLength = avpkt->size;
400  bs.MaxLength = bs.DataLength;
401  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
402  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
403  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
404  } else
405  return AVERROR_INVALIDDATA;
406 
407 
408  if(!q->session) {
409  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
410  if (ret < 0)
411  return ret;
412  }
413 
415  if (ret < 0)
416  return ret;
417 
418  param->mfx.CodecId = ret;
419  video_signal_info.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
420  video_signal_info.Header.BufferSz = sizeof(video_signal_info);
421  // The SDK doesn't support other ext buffers when calling MFXVideoDECODE_DecodeHeader,
422  // so do not append this buffer to the existent buffer array
423  param->ExtParam = header_ext_params;
424  param->NumExtParam = 1;
425  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
426  if (MFX_ERR_MORE_DATA == ret) {
427  return AVERROR(EAGAIN);
428  }
429  if (ret < 0)
430  return ff_qsv_print_error(avctx, ret,
431  "Error decoding stream header");
432 
433  avctx->color_range = video_signal_info.VideoFullRange ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
434 
435  if (video_signal_info.ColourDescriptionPresent) {
436  avctx->color_primaries = video_signal_info.ColourPrimaries;
437  avctx->color_trc = video_signal_info.TransferCharacteristics;
438  avctx->colorspace = video_signal_info.MatrixCoefficients;
439  }
440 
441  param->ExtParam = q->ext_buffers;
442  param->NumExtParam = q->nb_ext_buffers;
443 
444  if (param->mfx.FrameInfo.FrameRateExtN == 0 || param->mfx.FrameInfo.FrameRateExtD == 0) {
445  param->mfx.FrameInfo.FrameRateExtN = 25;
446  param->mfx.FrameInfo.FrameRateExtD = 1;
447  }
448 
449 #if QSV_VERSION_ATLEAST(1, 34)
450  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1)
451  param->mfx.FilmGrain = (avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) ? 0 : param->mfx.FilmGrain;
452 #endif
453 
454  return 0;
455 }
456 
458 {
459  int ret;
460 
461  if (q->pool)
462  ret = qsv_get_continuous_buffer(avctx, frame->frame, q->pool);
463  else
464  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
465 
466  if (ret < 0)
467  return ret;
468 
469  if (frame->frame->format == AV_PIX_FMT_QSV) {
470  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
471  } else {
472  ret = ff_qsv_map_frame_to_surface(frame->frame, &frame->surface);
473  if (ret < 0) {
474  av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
475  return ret;
476  }
477  }
478 
479  frame->surface.Info = q->frame_info;
480 
481  if (q->frames_ctx.mids) {
483  if (ret < 0)
484  return ret;
485 
486  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
487  }
488 
489  frame->surface.Data.ExtParam = frame->ext_param;
490  frame->surface.Data.NumExtParam = 0;
491  frame->num_ext_params = 0;
492  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
493  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
494  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->dec_info);
495 #if QSV_VERSION_ATLEAST(1, 34)
496  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1) {
497  frame->av1_film_grain_param.Header.BufferId = MFX_EXTBUFF_AV1_FILM_GRAIN_PARAM;
498  frame->av1_film_grain_param.Header.BufferSz = sizeof(frame->av1_film_grain_param);
499  frame->av1_film_grain_param.FilmGrainFlags = 0;
500  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->av1_film_grain_param);
501  }
502 #endif
503 
504 #if QSV_VERSION_ATLEAST(1, 35)
505  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 35) && avctx->codec_id == AV_CODEC_ID_HEVC) {
506  frame->mdcv.Header.BufferId = MFX_EXTBUFF_MASTERING_DISPLAY_COLOUR_VOLUME;
507  frame->mdcv.Header.BufferSz = sizeof(frame->mdcv);
508  // The data in mdcv is valid when this flag is 1
509  frame->mdcv.InsertPayloadToggle = 0;
510  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->mdcv);
511 
512  frame->clli.Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO;
513  frame->clli.Header.BufferSz = sizeof(frame->clli);
514  // The data in clli is valid when this flag is 1
515  frame->clli.InsertPayloadToggle = 0;
516  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->clli);
517  }
518 #endif
519 
520  frame->used = 1;
521 
522  return 0;
523 }
524 
526 {
527  QSVFrame *cur = q->work_frames;
528  while (cur) {
529  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
530  cur->used = 0;
531  av_frame_unref(cur->frame);
532  }
533  cur = cur->next;
534  }
535 }
536 
537 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
538 {
539  QSVFrame *frame, **last;
540  int ret;
541 
543 
544  frame = q->work_frames;
545  last = &q->work_frames;
546  while (frame) {
547  if (!frame->used) {
548  ret = alloc_frame(avctx, q, frame);
549  if (ret < 0)
550  return ret;
551  *surf = &frame->surface;
552  return 0;
553  }
554 
555  last = &frame->next;
556  frame = frame->next;
557  }
558 
559  frame = av_mallocz(sizeof(*frame));
560  if (!frame)
561  return AVERROR(ENOMEM);
562  frame->frame = av_frame_alloc();
563  if (!frame->frame) {
564  av_freep(&frame);
565  return AVERROR(ENOMEM);
566  }
567  *last = frame;
568 
569  ret = alloc_frame(avctx, q, frame);
570  if (ret < 0)
571  return ret;
572 
573  *surf = &frame->surface;
574 
575  return 0;
576 }
577 
578 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
579 {
580  QSVFrame *cur = q->work_frames;
581  while (cur) {
582  if (surf == &cur->surface)
583  return cur;
584  cur = cur->next;
585  }
586  return NULL;
587 }
588 
589 #if QSV_VERSION_ATLEAST(1, 34)
590 static int qsv_export_film_grain(AVCodecContext *avctx, mfxExtAV1FilmGrainParam *ext_param, AVFrame *frame)
591 {
592  AVFilmGrainParams *fgp;
594  int i;
595 
596  if (!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_APPLY))
597  return 0;
598 
600 
601  if (!fgp)
602  return AVERROR(ENOMEM);
603 
605  fgp->seed = ext_param->GrainSeed;
606  aom = &fgp->codec.aom;
607 
608  aom->chroma_scaling_from_luma = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CHROMA_SCALING_FROM_LUMA);
609  aom->scaling_shift = ext_param->GrainScalingMinus8 + 8;
610  aom->ar_coeff_lag = ext_param->ArCoeffLag;
611  aom->ar_coeff_shift = ext_param->ArCoeffShiftMinus6 + 6;
612  aom->grain_scale_shift = ext_param->GrainScaleShift;
613  aom->overlap_flag = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_OVERLAP);
614  aom->limit_output_range = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CLIP_TO_RESTRICTED_RANGE);
615 
616  aom->num_y_points = ext_param->NumYPoints;
617 
618  for (i = 0; i < aom->num_y_points; i++) {
619  aom->y_points[i][0] = ext_param->PointY[i].Value;
620  aom->y_points[i][1] = ext_param->PointY[i].Scaling;
621  }
622 
623  aom->num_uv_points[0] = ext_param->NumCbPoints;
624 
625  for (i = 0; i < aom->num_uv_points[0]; i++) {
626  aom->uv_points[0][i][0] = ext_param->PointCb[i].Value;
627  aom->uv_points[0][i][1] = ext_param->PointCb[i].Scaling;
628  }
629 
630  aom->num_uv_points[1] = ext_param->NumCrPoints;
631 
632  for (i = 0; i < aom->num_uv_points[1]; i++) {
633  aom->uv_points[1][i][0] = ext_param->PointCr[i].Value;
634  aom->uv_points[1][i][1] = ext_param->PointCr[i].Scaling;
635  }
636 
637  for (i = 0; i < 24; i++)
638  aom->ar_coeffs_y[i] = ext_param->ArCoeffsYPlus128[i] - 128;
639 
640  for (i = 0; i < 25; i++) {
641  aom->ar_coeffs_uv[0][i] = ext_param->ArCoeffsCbPlus128[i] - 128;
642  aom->ar_coeffs_uv[1][i] = ext_param->ArCoeffsCrPlus128[i] - 128;
643  }
644 
645  aom->uv_mult[0] = ext_param->CbMult;
646  aom->uv_mult[1] = ext_param->CrMult;
647  aom->uv_mult_luma[0] = ext_param->CbLumaMult;
648  aom->uv_mult_luma[1] = ext_param->CrLumaMult;
649  aom->uv_offset[0] = ext_param->CbOffset;
650  aom->uv_offset[1] = ext_param->CrOffset;
651 
652  return 0;
653 }
654 #endif
655 
656 #if QSV_VERSION_ATLEAST(1, 35)
657 static int qsv_export_hdr_side_data(AVCodecContext *avctx, mfxExtMasteringDisplayColourVolume *mdcv,
658  mfxExtContentLightLevelInfo *clli, AVFrame *frame)
659 {
660  int ret;
661 
662  // The SDK re-uses this flag for HDR SEI parsing
663  if (mdcv->InsertPayloadToggle) {
664  AVMasteringDisplayMetadata *mastering;
665  const int mapping[3] = {2, 0, 1};
666  const int chroma_den = 50000;
667  const int luma_den = 10000;
668  int i;
669 
670  ret = ff_decode_mastering_display_new(avctx, frame, &mastering);
671  if (ret < 0)
672  return ret;
673 
674  if (mastering) {
675  for (i = 0; i < 3; i++) {
676  const int j = mapping[i];
677  mastering->display_primaries[i][0] = av_make_q(mdcv->DisplayPrimariesX[j], chroma_den);
678  mastering->display_primaries[i][1] = av_make_q(mdcv->DisplayPrimariesY[j], chroma_den);
679  }
680 
681  mastering->white_point[0] = av_make_q(mdcv->WhitePointX, chroma_den);
682  mastering->white_point[1] = av_make_q(mdcv->WhitePointY, chroma_den);
683 
684  mastering->max_luminance = av_make_q(mdcv->MaxDisplayMasteringLuminance, luma_den);
685  mastering->min_luminance = av_make_q(mdcv->MinDisplayMasteringLuminance, luma_den);
686 
687  mastering->has_luminance = 1;
688  mastering->has_primaries = 1;
689  }
690  }
691 
692  // The SDK re-uses this flag for HDR SEI parsing
693  if (clli->InsertPayloadToggle) {
694  AVContentLightMetadata *light;
695 
696  ret = ff_decode_content_light_new(avctx, frame, &light);
697  if (ret < 0)
698  return ret;
699 
700  if (light) {
701  light->MaxCLL = clli->MaxContentLightLevel;
702  light->MaxFALL = clli->MaxPicAverageLightLevel;
703  }
704  }
705 
706  return 0;
707 }
708 
709 #endif
710 
711 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
712  AVFrame *frame, int *got_frame,
713  const AVPacket *avpkt)
714 {
715  mfxFrameSurface1 *insurf;
716  mfxFrameSurface1 *outsurf;
717  mfxSyncPoint *sync;
718  mfxBitstream bs = { { { 0 } } };
719  int ret;
720 
721  if (avpkt->size) {
722  bs.Data = avpkt->data;
723  bs.DataLength = avpkt->size;
724  bs.MaxLength = bs.DataLength;
725  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
726  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
727  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
728  }
729 
730  sync = av_mallocz(sizeof(*sync));
731  if (!sync) {
732  av_freep(&sync);
733  return AVERROR(ENOMEM);
734  }
735 
736  do {
737  ret = get_surface(avctx, q, &insurf);
738  if (ret < 0) {
739  av_freep(&sync);
740  return ret;
741  }
742 
743  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
744  insurf, &outsurf, sync);
745  if (ret == MFX_WRN_DEVICE_BUSY)
746  av_usleep(500);
747 
748  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
749 
750  if (ret == MFX_ERR_INCOMPATIBLE_VIDEO_PARAM) {
751  q->reinit_flag = 1;
752  av_log(avctx, AV_LOG_DEBUG, "Video parameter change\n");
753  av_freep(&sync);
754  return 0;
755  }
756 
757  if (ret != MFX_ERR_NONE &&
758  ret != MFX_ERR_MORE_DATA &&
759  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
760  ret != MFX_ERR_MORE_SURFACE) {
761  av_freep(&sync);
762  return ff_qsv_print_error(avctx, ret,
763  "Error during QSV decoding.");
764  }
765 
766  /* make sure we do not enter an infinite loop if the SDK
767  * did not consume any data and did not return anything */
768  if (!*sync && !bs.DataOffset) {
769  bs.DataOffset = avpkt->size;
770  ++q->zero_consume_run;
771  if (q->zero_consume_run > 1 &&
772  (avpkt->size ||
773  ret != MFX_ERR_MORE_DATA))
774  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
775  } else {
776  q->zero_consume_run = 0;
777  }
778 
779  if (*sync) {
780  QSVAsyncFrame aframe;
781  QSVFrame *out_frame = find_frame(q, outsurf);
782 
783  if (!out_frame) {
784  av_log(avctx, AV_LOG_ERROR,
785  "The returned surface does not correspond to any frame\n");
786  av_freep(&sync);
787  return AVERROR_BUG;
788  }
789 
790  out_frame->queued += 1;
791 
792  aframe = (QSVAsyncFrame){ sync, out_frame };
793  av_fifo_write(q->async_fifo, &aframe, 1);
794  } else {
795  av_freep(&sync);
796  }
797 
798  if ((av_fifo_can_read(q->async_fifo) >= q->async_depth) ||
799  (!avpkt->size && av_fifo_can_read(q->async_fifo))) {
800  QSVAsyncFrame aframe;
801  AVFrame *src_frame;
802 
803  av_fifo_read(q->async_fifo, &aframe, 1);
804  aframe.frame->queued -= 1;
805 
806  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
807  do {
808  ret = MFXVideoCORE_SyncOperation(q->session, *aframe.sync, 1000);
809  } while (ret == MFX_WRN_IN_EXECUTION);
810  }
811 
812  av_freep(&aframe.sync);
813 
814  src_frame = aframe.frame->frame;
815 
816  ret = av_frame_ref(frame, src_frame);
817  if (ret < 0)
818  return ret;
819 
820  outsurf = &aframe.frame->surface;
821 
822  frame->pts = MFX_PTS_TO_PTS(outsurf->Data.TimeStamp, avctx->pkt_timebase);
823 #if QSV_VERSION_ATLEAST(1, 34)
825  QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) &&
826  avctx->codec_id == AV_CODEC_ID_AV1) {
827  ret = qsv_export_film_grain(avctx, &aframe.frame->av1_film_grain_param, frame);
828 
829  if (ret < 0)
830  return ret;
831  }
832 #endif
833 
834 #if QSV_VERSION_ATLEAST(1, 35)
835  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 35) && avctx->codec_id == AV_CODEC_ID_HEVC) {
836  ret = qsv_export_hdr_side_data(avctx, &aframe.frame->mdcv, &aframe.frame->clli, frame);
837 
838  if (ret < 0)
839  return ret;
840  }
841 #endif
842 
843  frame->repeat_pict =
844  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
845  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
846  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
848  !!(outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF);
849  frame->flags |= AV_FRAME_FLAG_INTERLACED *
850  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
851  frame->pict_type = ff_qsv_map_pictype(aframe.frame->dec_info.FrameType);
852  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
853  if (avctx->codec_id == AV_CODEC_ID_H264) {
854  if (aframe.frame->dec_info.FrameType & MFX_FRAMETYPE_IDR)
855  frame->flags |= AV_FRAME_FLAG_KEY;
856  else
857  frame->flags &= ~AV_FRAME_FLAG_KEY;
858  }
859 
860  /* update the surface properties */
861  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
862  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
863 
864  *got_frame = 1;
865  }
866 
867  return bs.DataOffset;
868 }
869 
871 {
872  QSVFrame *cur = q->work_frames;
873 
874  if (q->session)
875  MFXVideoDECODE_Close(q->session);
876 
877  if (q->async_fifo) {
878  QSVAsyncFrame aframe;
879  while (av_fifo_read(q->async_fifo, &aframe, 1) >= 0)
880  av_freep(&aframe.sync);
882  }
883 
884  while (cur) {
885  q->work_frames = cur->next;
886  av_frame_free(&cur->frame);
887  av_freep(&cur);
888  cur = q->work_frames;
889  }
890 
892 
896 }
897 
899  AVFrame *frame, int *got_frame, const AVPacket *pkt)
900 {
901  int ret;
902  mfxVideoParam param = { 0 };
904 
905  if (!pkt->size)
906  return qsv_decode(avctx, q, frame, got_frame, pkt);
907 
908  /* TODO: flush delayed frames on reinit */
909 
910  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
911  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
912  // the assumption may be not corret but will be updated after header decoded if not true.
913  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
914  pix_fmt = q->orig_pix_fmt;
915  if (!avctx->coded_width)
916  avctx->coded_width = 1280;
917  if (!avctx->coded_height)
918  avctx->coded_height = 720;
919 
920  /* decode zero-size pkt to flush the buffered pkt before reinit */
921  if (q->reinit_flag) {
922  AVPacket zero_pkt = {0};
923  ret = qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
924  if (ret < 0 || *got_frame)
925  return ret;
926  }
927 
928  if (q->reinit_flag || !q->session || !q->initialized) {
929  mfxFrameAllocRequest request;
930  memset(&request, 0, sizeof(request));
931 
932  q->reinit_flag = 0;
933  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
934  if (ret < 0) {
935  if (ret == AVERROR(EAGAIN))
936  av_log(avctx, AV_LOG_VERBOSE, "More data is required to decode header\n");
937  else
938  av_log(avctx, AV_LOG_ERROR, "Error decoding header\n");
939  goto reinit_fail;
940  }
941  param.IOPattern = q->iopattern;
942 
943  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
944 
945  avctx->coded_width = param.mfx.FrameInfo.Width;
946  avctx->coded_height = param.mfx.FrameInfo.Height;
947 
948  ret = MFXVideoDECODE_QueryIOSurf(q->session, &param, &request);
949  if (ret < 0)
950  return ff_qsv_print_error(avctx, ret, "Error querying IO surface");
951 
952  q->suggest_pool_size = request.NumFrameSuggested;
953 
954  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
955  if (ret < 0)
956  goto reinit_fail;
957  q->initialized = 0;
958  }
959 
960  if (!q->initialized) {
961  ret = qsv_decode_init_context(avctx, q, &param);
962  if (ret < 0)
963  goto reinit_fail;
964  q->initialized = 1;
965  }
966 
967  return qsv_decode(avctx, q, frame, got_frame, pkt);
968 
969 reinit_fail:
970  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
971  return ret;
972 }
973 
978 };
979 
980 typedef struct QSVDecContext {
981  AVClass *class;
983 
985 
987 
989 } QSVDecContext;
990 
992 {
993  AVPacket pkt;
994  while (av_fifo_read(s->packet_fifo, &pkt, 1) >= 0)
996 
997  av_packet_unref(&s->buffer_pkt);
998 }
999 
1001 {
1002  QSVDecContext *s = avctx->priv_data;
1003 
1005 
1007 
1008  av_fifo_freep2(&s->packet_fifo);
1009 
1010  return 0;
1011 }
1012 
1014 {
1015  QSVDecContext *s = avctx->priv_data;
1016  int ret;
1017  const char *uid = NULL;
1018 
1019  if (avctx->codec_id == AV_CODEC_ID_VP8) {
1020  uid = "f622394d8d87452f878c51f2fc9b4131";
1021  } else if (avctx->codec_id == AV_CODEC_ID_VP9) {
1022  uid = "a922394d8d87452f878c51f2fc9b4131";
1023  }
1024  else if (avctx->codec_id == AV_CODEC_ID_HEVC && s->load_plugin != LOAD_PLUGIN_NONE) {
1025  static const char * const uid_hevcdec_sw = "15dd936825ad475ea34e35f3f54217a6";
1026  static const char * const uid_hevcdec_hw = "33a61c0b4c27454ca8d85dde757c6f8e";
1027 
1028  if (s->qsv.load_plugins[0]) {
1029  av_log(avctx, AV_LOG_WARNING,
1030  "load_plugins is not empty, but load_plugin is not set to 'none'."
1031  "The load_plugin value will be ignored.\n");
1032  } else {
1033  if (s->load_plugin == LOAD_PLUGIN_HEVC_SW)
1034  uid = uid_hevcdec_sw;
1035  else
1036  uid = uid_hevcdec_hw;
1037  }
1038  }
1039  if (uid) {
1040  av_freep(&s->qsv.load_plugins);
1041  s->qsv.load_plugins = av_strdup(uid);
1042  if (!s->qsv.load_plugins)
1043  return AVERROR(ENOMEM);
1044  }
1045 
1046  s->qsv.orig_pix_fmt = AV_PIX_FMT_NV12;
1047  s->packet_fifo = av_fifo_alloc2(1, sizeof(AVPacket),
1049  if (!s->packet_fifo) {
1050  ret = AVERROR(ENOMEM);
1051  goto fail;
1052  }
1053 
1054  if (!avctx->pkt_timebase.num)
1055  av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
1056 
1057  return 0;
1058 fail:
1059  qsv_decode_close(avctx);
1060  return ret;
1061 }
1062 
1064  int *got_frame, AVPacket *avpkt)
1065 {
1066  QSVDecContext *s = avctx->priv_data;
1067  int ret;
1068 
1069  /* buffer the input packet */
1070  if (avpkt->size) {
1071  AVPacket input_ref;
1072 
1073  ret = av_packet_ref(&input_ref, avpkt);
1074  if (ret < 0)
1075  return ret;
1076  av_fifo_write(s->packet_fifo, &input_ref, 1);
1077  }
1078 
1079  /* process buffered data */
1080  while (!*got_frame) {
1081  /* prepare the input data */
1082  if (s->buffer_pkt.size <= 0) {
1083  /* no more data */
1084  if (!av_fifo_can_read(s->packet_fifo))
1085  return avpkt->size ? avpkt->size : qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt);
1086  /* in progress of reinit, no read from fifo and keep the buffer_pkt */
1087  if (!s->qsv.reinit_flag) {
1088  av_packet_unref(&s->buffer_pkt);
1089  av_fifo_read(s->packet_fifo, &s->buffer_pkt, 1);
1090  }
1091  }
1092 
1093  ret = qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt);
1094  if (ret < 0){
1095  if (ret == AVERROR(EAGAIN))
1096  ret = 0;
1097 
1098  /* Drop buffer_pkt when failed to decode the packet. Otherwise,
1099  the decoder will keep decoding the failure packet. */
1100  av_packet_unref(&s->buffer_pkt);
1101  return ret;
1102  }
1103  if (s->qsv.reinit_flag)
1104  continue;
1105 
1106  s->buffer_pkt.size -= ret;
1107  s->buffer_pkt.data += ret;
1108  }
1109 
1110  return avpkt->size;
1111 }
1112 
1114 {
1115  QSVDecContext *s = avctx->priv_data;
1116 
1118 
1119  s->qsv.orig_pix_fmt = AV_PIX_FMT_NONE;
1120  s->qsv.initialized = 0;
1121 }
1122 
1123 #define OFFSET(x) offsetof(QSVDecContext, x)
1124 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1125 
1126 #define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \
1127 static const AVClass x##_qsv_class = { \
1128  .class_name = #x "_qsv", \
1129  .item_name = av_default_item_name, \
1130  .option = opt, \
1131  .version = LIBAVUTIL_VERSION_INT, \
1132 }; \
1133 const FFCodec ff_##x##_qsv_decoder = { \
1134  .p.name = #x "_qsv", \
1135  CODEC_LONG_NAME(#X " video (Intel Quick Sync Video acceleration)"), \
1136  .priv_data_size = sizeof(QSVDecContext), \
1137  .p.type = AVMEDIA_TYPE_VIDEO, \
1138  .p.id = AV_CODEC_ID_##X, \
1139  .init = qsv_decode_init, \
1140  FF_CODEC_DECODE_CB(qsv_decode_frame), \
1141  .flush = qsv_decode_flush, \
1142  .close = qsv_decode_close, \
1143  .bsfs = bsf_name, \
1144  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \
1145  .p.priv_class = &x##_qsv_class, \
1146  .hw_configs = qsv_hw_configs, \
1147  .p.wrapper_name = "qsv", \
1148  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, \
1149 }; \
1150 
1151 #define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options)
1152 
1153 #if CONFIG_HEVC_QSV_DECODER
1154 static const AVOption hevc_options[] = {
1155  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1156 
1157  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VD, .unit = "load_plugin" },
1158  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VD, .unit = "load_plugin" },
1159  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VD, .unit = "load_plugin" },
1160  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VD, .unit = "load_plugin" },
1161 
1162  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
1163  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD },
1164 
1165  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, .unit = "gpu_copy"},
1166  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, .unit = "gpu_copy"},
1167  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, .unit = "gpu_copy"},
1168  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, .unit = "gpu_copy"},
1169  { NULL },
1170 };
1171 DEFINE_QSV_DECODER_WITH_OPTION(hevc, HEVC, "hevc_mp4toannexb", hevc_options)
1172 #endif
1173 
1174 static const AVOption options[] = {
1175  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1176 
1177  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, .unit = "gpu_copy"},
1178  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, .unit = "gpu_copy"},
1179  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, .unit = "gpu_copy"},
1180  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, .unit = "gpu_copy"},
1181  { NULL },
1182 };
1183 
1184 #if CONFIG_H264_QSV_DECODER
1185 DEFINE_QSV_DECODER(h264, H264, "h264_mp4toannexb")
1186 #endif
1187 
1188 #if CONFIG_MPEG2_QSV_DECODER
1189 DEFINE_QSV_DECODER(mpeg2, MPEG2VIDEO, NULL)
1190 #endif
1191 
1192 #if CONFIG_VC1_QSV_DECODER
1193 DEFINE_QSV_DECODER(vc1, VC1, NULL)
1194 #endif
1195 
1196 #if CONFIG_MJPEG_QSV_DECODER
1197 DEFINE_QSV_DECODER(mjpeg, MJPEG, NULL)
1198 #endif
1199 
1200 #if CONFIG_VP8_QSV_DECODER
1201 DEFINE_QSV_DECODER(vp8, VP8, NULL)
1202 #endif
1203 
1204 #if CONFIG_VP9_QSV_DECODER
1205 DEFINE_QSV_DECODER(vp9, VP9, NULL)
1206 #endif
1207 
1208 #if CONFIG_AV1_QSV_DECODER
1209 DEFINE_QSV_DECODER(av1, AV1, NULL)
1210 #endif
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:302
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:427
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:60
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1451
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvdec.c:977
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uid
UID uid
Definition: mxfenc.c:2422
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1222
QSVFramesContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:115
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:88
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:200
options
static const AVOption options[]
Definition: qsvdec.c:1174
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:197
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvdec.c:975
AVFilmGrainAOMParams::uv_points
uint8_t uv_points[2][10][2]
Definition: film_grain_params.h:63
OFFSET
#define OFFSET(x)
Definition: qsvdec.c:1123
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:111
AVFilmGrainParams::aom
AVFilmGrainAOMParams aom
Definition: film_grain_params.h:260
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:322
ff_qsv_close_internal_session
int ff_qsv_close_internal_session(QSVSession *qs)
Definition: qsv.c:1111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
ff_qsv_map_pictype
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:375
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:678
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:524
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:708
AVOption
AVOption.
Definition: opt.h:346
qsv_decode_close_qsvcontext
static void qsv_decode_close_qsvcontext(QSVContext *q)
Definition: qsvdec.c:870
ff_qsv_find_surface_idx
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:344
AV_PIX_FMT_XV30
#define AV_PIX_FMT_XV30
Definition: pixfmt.h:534
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
QSVContext::work_frames
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.c:90
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:217
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvdec.c:976
qsv_decode_init_context
static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:362
QSVFrame::frame
AVFrame * frame
Definition: qsv_internal.h:80
AVQSVContext::iopattern
int iopattern
The IO pattern to use.
Definition: qsv.h:46
QSVFrame::used
int used
Definition: qsv_internal.h:100
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:250
ff_qsv_init_session_device
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
Definition: qsv.c:988
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:107
ff_qsv_map_frame_to_surface
int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
Definition: qsv.c:283
fifo.h
qsv_decode_flush
static void qsv_decode_flush(AVCodecContext *avctx)
Definition: qsvdec.c:1113
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:315
QSVContext::suggest_pool_size
int suggest_pool_size
Definition: qsvdec.c:100
fail
#define fail()
Definition: checkasm.h:179
qsv_decode
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *avpkt)
Definition: qsvdec.c:711
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AVFilmGrainAOMParams::grain_scale_shift
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
Definition: film_grain_params.h:99
QSVDecContext::qsv
QSVContext qsv
Definition: qsvdec.c:982
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
AVFilmGrainAOMParams::limit_output_range
int limit_output_range
Signals to clip to limited color levels after film grain application.
Definition: film_grain_params.h:122
AVFilmGrainAOMParams::num_y_points
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
Definition: film_grain_params.h:49
AVRational::num
int num
Numerator.
Definition: rational.h:59
QSVDecContext::packet_fifo
AVFifo * packet_fifo
Definition: qsvdec.c:986
QSVContext::async_fifo
AVFifo * async_fifo
Definition: qsvdec.c:92
refstruct.h
QSVContext
Definition: qsvdec.c:76
qsv_internal.h
AVFilmGrainAOMParams
This structure describes how to handle film grain synthesis for AOM codecs.
Definition: film_grain_params.h:44
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
get_surface
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:537
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:532
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:671
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_qsv_print_warning
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:194
ASYNC_DEPTH_DEFAULT
#define ASYNC_DEPTH_DEFAULT
Definition: qsv_internal.h:50
film_grain_params.h
av_cold
#define av_cold
Definition: attributes.h:90
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AVHWFramesContext::height
int height
Definition: hwcontext.h:217
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
QSVDecContext
Definition: qsvdec.c:980
QSVContext::iopattern
int iopattern
Definition: qsvdec.c:105
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
s
#define s(width, name)
Definition: cbs_vp9.c:198
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2932
QSVContext::reinit_flag
int reinit_flag
Definition: qsvdec.c:94
QSVContext::frames_ctx
QSVFramesContext frames_ctx
Definition: qsvdec.c:85
QSVContext::internal_qs
QSVSession internal_qs
Definition: qsvdec.c:83
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
mfx_tb
static const AVRational mfx_tb
Definition: qsvdec.c:61
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
QSVContext::ver
mfxVersion ver
Definition: qsvdec.c:79
qsv_process_data
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: qsvdec.c:898
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:63
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:33
MFX_PTS_TO_PTS
#define MFX_PTS_TO_PTS(mfx_pts, pts_tb)
Definition: qsvdec.c:67
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:343
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AVQSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsv.h:52
ff_decode_mastering_display_new
int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame, AVMasteringDisplayMetadata **mdm)
Wrapper around av_mastering_display_metadata_create_side_data(), which rejects side data overridden b...
Definition: decode.c:2012
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
if
if(ret)
Definition: filter_design.txt:179
ff_qsv_init_session_frames
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Definition: qsv.c:1065
QSVFrame
Definition: qsv_internal.h:79
AVFilmGrainAOMParams::uv_mult_luma
int uv_mult_luma[2]
Definition: film_grain_params.h:106
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:210
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:280
qsv.h
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
QSV_HAVE_OPAQUE
#define QSV_HAVE_OPAQUE
Definition: qsv_internal.h:68
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
qsv_decode_preinit
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:266
ff_qsv_print_iopattern
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsv.c:100
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
av_fifo_can_read
size_t av_fifo_can_read(const AVFifo *f)
Definition: fifo.c:87
qsv_get_continuous_buffer
static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
Definition: qsvdec.c:127
QSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsvdec.c:111
QSVFrame::surface
mfxFrameSurface1 surface
Definition: qsv_internal.h:81
time.h
alloc_frame
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:457
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:247
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: packet.c:435
QSVContext::load_plugins
char * load_plugins
Definition: qsvdec.c:108
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1783
QSVContext::initialized
int initialized
Definition: qsvdec.c:101
QSVContext::fourcc
uint32_t fourcc
Definition: qsvdec.c:97
QSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Definition: qsvdec.c:110
AVFilmGrainAOMParams::num_uv_points
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
Definition: film_grain_params.h:62
QSVContext::frame_info
mfxFrameInfo frame_info
Definition: qsvdec.c:98
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1556
AVPacket::size
int size
Definition: packet.h:525
AVFifo
Definition: fifo.c:35
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
AVCodecContext::extra_hw_frames
int extra_hw_frames
Video decoding only.
Definition: avcodec.h:1520
DEFINE_QSV_DECODER_WITH_OPTION
#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt)
Definition: qsvdec.c:1126
codec_internal.h
AV_PIX_FMT_P012
#define AV_PIX_FMT_P012
Definition: pixfmt.h:529
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AVQSVContext::session
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
qsv_init_session
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:182
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:551
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
qsv_decode_close
static av_cold int qsv_decode_close(AVCodecContext *avctx)
Definition: qsvdec.c:1000
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:238
qsv_clear_unused_frames
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:525
AVCodecHWConfigInternal
Definition: hwconfig.h:25
frame.h
AV_PIX_FMT_Y212
#define AV_PIX_FMT_Y212
Definition: pixfmt.h:533
AVQSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVFilmGrainAOMParams::ar_coeffs_y
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
Definition: film_grain_params.h:80
QSVFramesContext::mids
QSVMid * mids
The memory ids for the external frames.
Definition: qsv_internal.h:124
QSVAsyncFrame::frame
QSVFrame * frame
Definition: qsvdec.c:73
hwcontext_qsv.h
MFXUnload
#define MFXUnload(a)
Definition: qsvdec.c:58
qsv_decode_header
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, const AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:388
QSVContext::pool
AVBufferPool * pool
Definition: qsvdec.c:99
log.h
ff_qsv_map_picstruct
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:357
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:517
qsv_hw_configs
static const AVCodecHWConfigInternal *const qsv_hw_configs[]
Definition: qsvdec.c:114
QSVDecContext::buffer_pkt
AVPacket buffer_pkt
Definition: qsvdec.c:988
common.h
QSVContext::session
mfxSession session
Definition: qsvdec.c:78
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVFilmGrainAOMParams::scaling_shift
int scaling_shift
Specifies the shift applied to the chroma components.
Definition: film_grain_params.h:69
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1497
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:633
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:669
QSVDecContext::load_plugin
int load_plugin
Definition: qsvdec.c:984
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1475
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:115
find_frame
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:578
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
QSVFrame::queued
int queued
Definition: qsv_internal.h:99
QSVContext::async_depth
int async_depth
Definition: qsvdec.c:104
QSVSession
Definition: qsv_internal.h:105
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:150
ff_qsv_codec_id_to_mfx
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:54
LoadPlugin
LoadPlugin
Definition: qsvdec.c:974
ff_decode_content_light_new
int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame, AVContentLightMetadata **clm)
Wrapper around av_content_light_metadata_create_side_data(), which rejects side data overridden by th...
Definition: decode.c:2057
QSVContext::zero_consume_run
int zero_consume_run
Definition: qsvdec.c:93
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1448
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFilmGrainAOMParams::ar_coeff_lag
int ar_coeff_lag
Specifies the auto-regression lag.
Definition: film_grain_params.h:74
QSVContext::orig_pix_fmt
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.c:96
AVFilmGrainAOMParams::y_points
uint8_t y_points[14][2]
Definition: film_grain_params.h:50
AVFilmGrainAOMParams::uv_offset
int uv_offset[2]
Offset used for component scaling function.
Definition: film_grain_params.h:112
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1639
AVQSVContext
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
QSVSession::session
mfxSession session
Definition: qsv_internal.h:106
ff_qsv_map_fourcc
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:203
AVFilmGrainAOMParams::uv_mult
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
Definition: film_grain_params.h:105
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1926
qsv_decode_frame
static int qsv_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:1063
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:528
AVFilmGrainAOMParams::overlap_flag
int overlap_flag
Signals whether to overlap film grain blocks.
Definition: film_grain_params.h:117
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:53
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:187
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
qsv_decode_init
static av_cold int qsv_decode_init(AVCodecContext *avctx)
Definition: qsvdec.c:1013
QSVFrame::dec_info
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:83
mastering_display_metadata.h
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1532
qsv_clear_buffers
static void qsv_clear_buffers(QSVDecContext *s)
Definition: qsvdec.c:991
DEFINE_QSV_DECODER
#define DEFINE_QSV_DECODER(x, X, bsf_name)
Definition: qsvdec.c:1151
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
QSVAsyncFrame::sync
mfxSyncPoint * sync
Definition: qsvdec.c:72
QSVFramesContext
Definition: qsv_internal.h:114
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:116
AVPacket
This structure stores compressed data.
Definition: packet.h:501
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
imgutils.h
PTS_TO_MFX_PTS
#define PTS_TO_MFX_PTS(pts, pts_tb)
Definition: qsvdec.c:63
AV_PIX_FMT_XV36
#define AV_PIX_FMT_XV36
Definition: pixfmt.h:535
AV_CODEC_ID_VP8
@ AV_CODEC_ID_VP8
Definition: codec_id.h:192
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
VD
#define VD
Definition: qsvdec.c:1124
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
QSVContext::gpu_copy
int gpu_copy
Definition: qsvdec.c:106
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:664
QSVAsyncFrame
Definition: qsvdec.c:71
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
AVFilmGrainAOMParams::chroma_scaling_from_luma
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
Definition: film_grain_params.h:56
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:406
QSVSession::loader
void * loader
Definition: qsv_internal.h:111
ff_qsv_frame_add_ext_param
void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame, mfxExtBuffer *param)
Definition: qsv.c:1129
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:30
AV_FILM_GRAIN_PARAMS_AV1
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
Definition: film_grain_params.h:30
QSVFrame::next
struct QSVFrame * next
Definition: qsv_internal.h:102
ff_qsv_print_error
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:185
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:242
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
ff_qsv_init_internal_session
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
Definition: qsv.c:677
ff_refstruct_unref
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:420
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
AVFilmGrainParams::codec
union AVFilmGrainParams::@386 codec
Additional fields may be added both here and in any structure included.
AVFilmGrainAOMParams::ar_coeff_shift
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
Definition: film_grain_params.h:93
AVFilmGrainAOMParams::ar_coeffs_uv
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.
Definition: film_grain_params.h:86