FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/attributes.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/mem.h"
39 #include "libavutil/opt.h"
40 #include "avcodec.h"
41 #include "blockdsp.h"
42 #include "codec_internal.h"
43 #include "copy_block.h"
44 #include "decode.h"
45 #include "exif.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "jpegtables.h"
51 #include "mjpeg.h"
52 #include "mjpegdec.h"
53 #include "jpeglsdec.h"
54 #include "profiles.h"
55 #include "put_bits.h"
56 
57 
59  const uint8_t **pbuf_ptr, size_t *pbuf_size);
60 
62 {
63  static const struct {
64  int class;
65  int index;
66  const uint8_t *bits;
67  const uint8_t *values;
68  int length;
69  } ht[] = {
71  ff_mjpeg_val_dc, 12 },
73  ff_mjpeg_val_dc, 12 },
82  };
83  int i, ret;
84 
85  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
86  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
87  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
88  ht[i].bits, ht[i].values,
89  ht[i].class == 1, s->avctx);
90  if (ret < 0)
91  return ret;
92 
93  if (ht[i].class < 2) {
94  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
95  ht[i].bits + 1, 16);
96  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
97  ht[i].values, ht[i].length);
98  }
99  }
100 
101  return 0;
102 }
103 
104 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
105 {
106  if (len > 12 && buf[12] == 1) /* 1 - NTSC */
107  s->interlace_polarity = 1;
108  if (len > 12 && buf[12] == 2) /* 2 - PAL */
109  s->interlace_polarity = 0;
110  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
111  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 12 ? buf[12] : -1);
112 }
113 
114 static void init_idct(AVCodecContext *avctx)
115 {
116  MJpegDecodeContext *s = avctx->priv_data;
117 
118  ff_idctdsp_init(&s->idsp, avctx);
119  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
120  s->idsp.idct_permutation);
121 }
122 
124 {
125  MJpegDecodeContext *s = avctx->priv_data;
126  int ret;
127 
128  if (!s->picture_ptr) {
129  s->picture = av_frame_alloc();
130  if (!s->picture)
131  return AVERROR(ENOMEM);
132  s->picture_ptr = s->picture;
133  }
134 
135  s->avctx = avctx;
136  ff_blockdsp_init(&s->bdsp);
137  init_idct(avctx);
138  s->buffer_size = 0;
139  s->buffer = NULL;
140  s->first_picture = 1;
141  s->got_picture = 0;
142  s->orig_height = avctx->coded_height;
144  avctx->colorspace = AVCOL_SPC_BT470BG;
145  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
146 
147  if ((ret = init_default_huffman_tables(s)) < 0)
148  return ret;
149 
150  if (s->extern_huff && avctx->extradata) {
151  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
152  bytestream2_init(&s->gB, avctx->extradata, avctx->extradata_size);
153  if (ff_mjpeg_decode_dht(s)) {
154  av_log(avctx, AV_LOG_ERROR,
155  "error using external huffman table, switching back to internal\n");
156  if ((ret = init_default_huffman_tables(s)) < 0)
157  return ret;
158  }
159  }
160  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
161  s->interlace_polarity = 1; /* bottom field first */
162  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
163  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
164  if (avctx->codec_tag == AV_RL32("MJPG"))
165  s->interlace_polarity = 1;
166  }
167 
168  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
169  if (avctx->extradata_size >= 4)
170  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
171 
172  if (s->smv_frames_per_jpeg <= 0) {
173  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
174  return AVERROR_INVALIDDATA;
175  }
176 
177  s->smv_frame = av_frame_alloc();
178  if (!s->smv_frame)
179  return AVERROR(ENOMEM);
180  } else if (avctx->extradata_size > 8
181  && AV_RL32(avctx->extradata) == 0x2C
182  && AV_RL32(avctx->extradata + 4) == 0x18) {
183  parse_avid(s, avctx->extradata, avctx->extradata_size);
184  }
185 
186  if (avctx->codec->id == AV_CODEC_ID_AMV)
187  s->flipped = 1;
188 
189  return 0;
190 }
191 
192 
193 static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
194 {
195  int len = bytestream2_get_be16u(&s->gB);
196  if (len < 2 || bytestream2_get_bytes_left(&s->gB) < (len - 2)) {
197  av_log(s->avctx, AV_LOG_ERROR, "%s: invalid len %d\n", name, len);
198  return AVERROR_INVALIDDATA;
199  }
200  *plen = len - 2;
201  return 0;
202 }
203 
204 /* quantize tables */
206 {
207  int len, index, i;
208 
209  int ret = mjpeg_parse_len(s, &len, "dqt");
210  if (ret < 0)
211  return ret;
212 
213  while (len >= 65) {
214  uint8_t b = bytestream2_get_byteu(&s->gB);
215  int pr = b >> 4;
216  if (pr > 1) {
217  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
218  return AVERROR_INVALIDDATA;
219  }
220  if (len < (1 + 64 * (1 + pr)))
221  return AVERROR_INVALIDDATA;
222  index = b & 0x0F;
223  if (index >= 4)
224  return AVERROR_INVALIDDATA;
225  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
226  /* read quant table */
227  for (i = 0; i < 64; i++) {
228  s->quant_matrixes[index][i] = pr ? bytestream2_get_be16u(&s->gB) : bytestream2_get_byteu(&s->gB);
229  if (s->quant_matrixes[index][i] == 0) {
230  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
231  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
232  if (s->avctx->err_recognition & AV_EF_EXPLODE)
233  return AVERROR_INVALIDDATA;
234  }
235  }
236 
237  // XXX FIXME fine-tune, and perhaps add dc too
238  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
239  s->quant_matrixes[index][8]) >> 1;
240  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
241  index, s->qscale[index]);
242  len -= 1 + 64 * (1 + pr);
243  }
244  return 0;
245 }
246 
247 /* decode huffman tables and build VLC decoders */
249 {
250  int len, index, i, class, n, v;
251  uint8_t bits_table[17];
252  uint8_t val_table[256];
253  int ret = 0;
254 
255  ret = mjpeg_parse_len(s, &len, "dht");
256  if (ret < 0)
257  return ret;
258 
259  while (len > 0) {
260  if (len < 17)
261  return AVERROR_INVALIDDATA;
262  uint8_t b = bytestream2_get_byteu(&s->gB);
263  class = b >> 4;
264  if (class >= 2)
265  return AVERROR_INVALIDDATA;
266  index = b & 0x0F;
267  if (index >= 4)
268  return AVERROR_INVALIDDATA;
269  n = 0;
270  for (i = 1; i <= 16; i++) {
271  bits_table[i] = bytestream2_get_byteu(&s->gB);
272  n += bits_table[i];
273  }
274  len -= 17;
275  if (len < n || n > 256)
276  return AVERROR_INVALIDDATA;
277 
278  for (i = 0; i < n; i++) {
279  v = bytestream2_get_byteu(&s->gB);
280  val_table[i] = v;
281  }
282  len -= n;
283 
284  /* build VLC and flush previous vlc if present */
285  ff_vlc_free(&s->vlcs[class][index]);
286  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
287  class, index, n);
288  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
289  val_table, class > 0, s->avctx)) < 0)
290  return ret;
291 
292  if (class > 0) {
293  ff_vlc_free(&s->vlcs[2][index]);
294  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
295  val_table, 0, s->avctx)) < 0)
296  return ret;
297  }
298 
299  for (i = 0; i < 16; i++)
300  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
301  for (i = 0; i < 256; i++)
302  s->raw_huffman_values[class][index][i] = val_table[i];
303  }
304  return 0;
305 }
306 
308 {
309  int len, nb_components, i, width, height, bits, ret, size_change;
310  unsigned pix_fmt_id;
311  int h_count[MAX_COMPONENTS] = { 0 };
312  int v_count[MAX_COMPONENTS] = { 0 };
313 
314  s->cur_scan = 0;
315  memset(s->upscale_h, 0, sizeof(s->upscale_h));
316  memset(s->upscale_v, 0, sizeof(s->upscale_v));
317 
318  ret = mjpeg_parse_len(s, &len, "sof");
319  if (ret < 0)
320  return ret;
321  if (len < 6)
322  return AVERROR_INVALIDDATA;
323  bits = bytestream2_get_byteu(&s->gB);
324 
325  if (bits > 16 || bits < 1) {
326  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
327  return AVERROR_INVALIDDATA;
328  }
329 
330  if (s->avctx->bits_per_raw_sample != bits) {
331  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
332  s->avctx->bits_per_raw_sample = bits;
333  init_idct(s->avctx);
334  }
335  if (s->pegasus_rct)
336  bits = 9;
337  if (bits == 9 && !s->pegasus_rct)
338  s->rct = 1; // FIXME ugly
339 
340  if (s->lossless && s->avctx->lowres) {
341  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
342  return AVERROR(ENOSYS);
343  }
344 
345  height = bytestream2_get_be16u(&s->gB);
346  width = bytestream2_get_be16u(&s->gB);
347 
348  // HACK for odd_height.mov
349  if (s->interlaced && s->width == width && s->height == height + 1)
350  height = s->height;
351 
352  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
353  if (av_image_check_size(width, height, 0, s->avctx) < 0)
354  return AVERROR_INVALIDDATA;
355 
356  if (!s->progressive && !s->ls) {
357  // A valid frame requires at least 1 bit for DC + 1 bit for AC for each 8x8 block.
358  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
359  return AVERROR_INVALIDDATA;
360  }
361 
362  nb_components = bytestream2_get_byteu(&s->gB);
363  if (nb_components <= 0 ||
364  nb_components > MAX_COMPONENTS)
365  return AVERROR_INVALIDDATA;
366  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
367  if (nb_components != s->nb_components) {
368  av_log(s->avctx, AV_LOG_ERROR,
369  "nb_components changing in interlaced picture\n");
370  return AVERROR_INVALIDDATA;
371  }
372  }
373  if (s->ls && !(bits <= 8 || nb_components == 1)) {
375  "JPEG-LS that is not <= 8 "
376  "bits/component or 16-bit gray");
377  return AVERROR_PATCHWELCOME;
378  }
379  len -= 6;
380  if (len != 3 * nb_components) {
381  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
382  return AVERROR_INVALIDDATA;
383  }
384 
385  s->nb_components = nb_components;
386  s->h_max = 1;
387  s->v_max = 1;
388  for (i = 0; i < nb_components; i++) {
389  /* component id */
390  s->component_id[i] = bytestream2_get_byteu(&s->gB);
391  uint8_t b = bytestream2_get_byteu(&s->gB);
392  h_count[i] = b >> 4;
393  v_count[i] = b & 0x0F;
394  /* compute hmax and vmax (only used in interleaved case) */
395  if (h_count[i] > s->h_max)
396  s->h_max = h_count[i];
397  if (v_count[i] > s->v_max)
398  s->v_max = v_count[i];
399  s->quant_index[i] = bytestream2_get_byteu(&s->gB);
400  if (s->quant_index[i] >= 4) {
401  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
402  return AVERROR_INVALIDDATA;
403  }
404  if (!h_count[i] || !v_count[i]) {
405  av_log(s->avctx, AV_LOG_ERROR,
406  "Invalid sampling factor in component %d %d:%d\n",
407  i, h_count[i], v_count[i]);
408  return AVERROR_INVALIDDATA;
409  }
410 
411  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
412  i, h_count[i], v_count[i],
413  s->component_id[i], s->quant_index[i]);
414  }
415  if ( nb_components == 4
416  && s->component_id[0] == 'C'
417  && s->component_id[1] == 'M'
418  && s->component_id[2] == 'Y'
419  && s->component_id[3] == 'K')
420  s->adobe_transform = 0;
421 
422  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
423  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
424  return AVERROR_PATCHWELCOME;
425  }
426 
427  if (s->bayer) {
428  if (nb_components == 2) {
429  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
430  width stored in their SOF3 markers is the width of each one. We only output
431  a single component, therefore we need to adjust the output image width. We
432  handle the deinterleaving (but not the debayering) in this file. */
433  width *= 2;
434  }
435  /* They can also contain 1 component, which is double the width and half the height
436  of the final image (rows are interleaved). We don't handle the decoding in this
437  file, but leave that to the TIFF/DNG decoder. */
438  }
439 
440  /* if different size, realloc/alloc picture */
441  if (width != s->width || height != s->height || bits != s->bits ||
442  memcmp(s->h_count, h_count, sizeof(h_count)) ||
443  memcmp(s->v_count, v_count, sizeof(v_count))) {
444  size_change = 1;
445 
446  s->width = width;
447  s->height = height;
448  s->bits = bits;
449  memcpy(s->h_count, h_count, sizeof(h_count));
450  memcpy(s->v_count, v_count, sizeof(v_count));
451  s->interlaced = 0;
452  s->got_picture = 0;
453 
454  /* test interlaced mode */
455  if (s->first_picture &&
456  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
457  s->orig_height != 0 &&
458  s->height < ((s->orig_height * 3) / 4)) {
459  s->interlaced = 1;
460  s->bottom_field = s->interlace_polarity;
461  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
462  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
463  height *= 2;
464  }
465 
466  ret = ff_set_dimensions(s->avctx, width, height);
467  if (ret < 0)
468  return ret;
469 
470  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
471  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
472  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
473  s->orig_height < height)
474  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
475 
476  s->first_picture = 0;
477  } else {
478  size_change = 0;
479  }
480 
481  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
482  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
483  if (s->avctx->height <= 0)
484  return AVERROR_INVALIDDATA;
485  }
486  if (s->bayer && s->progressive) {
487  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
488  return AVERROR_INVALIDDATA;
489  }
490 
491  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
492  if (s->progressive) {
493  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
494  return AVERROR_INVALIDDATA;
495  }
496  } else {
497  if (s->v_max == 1 && s->h_max == 1 && s->lossless == 1 && (nb_components == 3 || nb_components == 4))
498  s->rgb = 1;
499  else if (!s->lossless)
500  s->rgb = 0;
501  /* XXX: not complete test ! */
502  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
503  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
504  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
505  (s->h_count[3] << 4) | s->v_count[3];
506  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
507  /* NOTE we do not allocate pictures large enough for the possible
508  * padding of h/v_count being 4 */
509  if (!(pix_fmt_id & 0xD0D0D0D0))
510  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
511  if (!(pix_fmt_id & 0x0D0D0D0D))
512  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
513 
514  for (i = 0; i < 8; i++) {
515  int j = 6 + (i & 1) - (i & 6);
516  int is = (pix_fmt_id >> (4 * i)) & 0xF;
517  int js = (pix_fmt_id >> (4 * j)) & 0xF;
518 
519  if (is == 1 && js != 2 && (i < 2 || i > 5))
520  js = (pix_fmt_id >> ( 8 + 4 * (i & 1))) & 0xF;
521  if (is == 1 && js != 2 && (i < 2 || i > 5))
522  js = (pix_fmt_id >> (16 + 4 * (i & 1))) & 0xF;
523 
524  if (is == 1 && js == 2) {
525  if (i & 1) s->upscale_h[j / 2] = 1;
526  else s->upscale_v[j / 2] = 1;
527  }
528  }
529 
530  if (s->bayer) {
531  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
532  goto unk_pixfmt;
533  }
534 
535  switch (pix_fmt_id) {
536  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
537  if (!s->bayer)
538  goto unk_pixfmt;
539  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
540  break;
541  case 0x11111100:
542  if (s->rgb)
543  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
544  else {
545  if ( s->adobe_transform == 0
546  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
547  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
548  } else {
549  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
550  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
551  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
552  }
553  }
554  av_assert0(s->nb_components == 3);
555  break;
556  case 0x11111111:
557  if (s->rgb)
558  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
559  else {
560  if (s->adobe_transform == 0 && s->bits <= 8) {
561  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
562  } else {
563  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
564  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
565  }
566  }
567  av_assert0(s->nb_components == 4);
568  break;
569  case 0x11412100:
570  if (s->bits > 8)
571  goto unk_pixfmt;
572  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
573  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
574  s->upscale_h[0] = 4;
575  s->upscale_h[1] = 0;
576  s->upscale_h[2] = 1;
577  } else {
578  goto unk_pixfmt;
579  }
580  break;
581  case 0x22111122:
582  case 0x22111111:
583  if (s->adobe_transform == 0 && s->bits <= 8) {
584  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
585  s->upscale_v[1] = s->upscale_v[2] = 1;
586  s->upscale_h[1] = s->upscale_h[2] = 1;
587  } else if (s->adobe_transform == 2 && s->bits <= 8) {
588  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
589  s->upscale_v[1] = s->upscale_v[2] = 1;
590  s->upscale_h[1] = s->upscale_h[2] = 1;
591  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
592  } else {
593  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
594  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
595  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
596  }
597  av_assert0(s->nb_components == 4);
598  break;
599  case 0x12121100:
600  case 0x22122100:
601  case 0x21211100:
602  case 0x21112100:
603  case 0x22211200:
604  case 0x22221100:
605  case 0x22112200:
606  case 0x11222200:
607  if (s->bits > 8)
608  goto unk_pixfmt;
609  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
610  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
611  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
612  } else {
613  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
614  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
615  }
616  break;
617  case 0x11000000:
618  case 0x13000000:
619  case 0x14000000:
620  case 0x31000000:
621  case 0x33000000:
622  case 0x34000000:
623  case 0x41000000:
624  case 0x43000000:
625  case 0x44000000:
626  if (s->bits <= 8)
627  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
628  else
629  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
630  break;
631  case 0x12111100:
632  case 0x14121200:
633  case 0x14111100:
634  case 0x22211100:
635  case 0x22112100:
636  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
637  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
638  else
639  goto unk_pixfmt;
640  s->upscale_v[1] = s->upscale_v[2] = 1;
641  } else {
642  if (pix_fmt_id == 0x14111100)
643  s->upscale_v[1] = s->upscale_v[2] = 1;
644  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
645  else
646  goto unk_pixfmt;
647  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
648  }
649  break;
650  case 0x21111100:
651  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
652  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
653  else
654  goto unk_pixfmt;
655  s->upscale_h[1] = s->upscale_h[2] = 1;
656  } else {
657  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
658  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
659  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
660  }
661  break;
662  case 0x11311100:
663  if (s->bits > 8)
664  goto unk_pixfmt;
665  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
666  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
667  else
668  goto unk_pixfmt;
669  s->upscale_h[0] = s->upscale_h[2] = 2;
670  break;
671  case 0x31111100:
672  if (s->bits > 8)
673  goto unk_pixfmt;
674  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
675  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
676  s->upscale_h[1] = s->upscale_h[2] = 2;
677  break;
678  case 0x22121100:
679  case 0x22111200:
680  case 0x41211100:
681  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
682  else
683  goto unk_pixfmt;
684  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
685  break;
686  case 0x22111100:
687  case 0x23111100:
688  case 0x42111100:
689  case 0x24111100:
690  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
691  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
692  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
693  if (pix_fmt_id == 0x42111100) {
694  if (s->bits > 8)
695  goto unk_pixfmt;
696  s->upscale_h[1] = s->upscale_h[2] = 1;
697  } else if (pix_fmt_id == 0x24111100) {
698  if (s->bits > 8)
699  goto unk_pixfmt;
700  s->upscale_v[1] = s->upscale_v[2] = 1;
701  } else if (pix_fmt_id == 0x23111100) {
702  if (s->bits > 8)
703  goto unk_pixfmt;
704  s->upscale_v[1] = s->upscale_v[2] = 2;
705  }
706  break;
707  case 0x41111100:
708  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
709  else
710  goto unk_pixfmt;
711  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
712  break;
713  default:
714  unk_pixfmt:
715  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
716  memset(s->upscale_h, 0, sizeof(s->upscale_h));
717  memset(s->upscale_v, 0, sizeof(s->upscale_v));
718  return AVERROR_PATCHWELCOME;
719  }
720  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
721  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
722  return AVERROR_PATCHWELCOME;
723  }
724  if (s->ls) {
725  memset(s->upscale_h, 0, sizeof(s->upscale_h));
726  memset(s->upscale_v, 0, sizeof(s->upscale_v));
727  if (s->nb_components == 3) {
728  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
729  } else if (s->nb_components != 1) {
730  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
731  return AVERROR_PATCHWELCOME;
732  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
733  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
734  else if (s->bits <= 8)
735  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
736  else
737  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
738  }
739 
740  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
741  if (!s->pix_desc) {
742  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
743  return AVERROR_BUG;
744  }
745 
746  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
747  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
748  } else {
749  enum AVPixelFormat pix_fmts[] = {
750 #if CONFIG_MJPEG_NVDEC_HWACCEL
752 #endif
753 #if CONFIG_MJPEG_VAAPI_HWACCEL
755 #endif
756  s->avctx->pix_fmt,
758  };
759  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
760  if (s->hwaccel_pix_fmt < 0)
761  return AVERROR(EINVAL);
762 
763  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
764  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
765  }
766 
767  if (s->avctx->skip_frame == AVDISCARD_ALL) {
768  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
769  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
770  s->got_picture = 1;
771  return 0;
772  }
773 
774  av_frame_unref(s->picture_ptr);
775  ret = ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF);
776  if (ret < 0)
777  return ret;
778  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
779  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
780  s->got_picture = 1;
781 
782  // Lets clear the palette to avoid leaving uninitialized values in it
783  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
784  memset(s->picture_ptr->data[1], 0, 1024);
785 
786  for (i = 0; i < 4; i++)
787  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
788 
789  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
790  s->width, s->height, s->linesize[0], s->linesize[1],
791  s->interlaced, s->avctx->height);
792 
793  }
794 
795  if ((s->rgb && !s->lossless && !s->ls) ||
796  (!s->rgb && s->ls && s->nb_components > 1) ||
797  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
798  ) {
799  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
800  return AVERROR_PATCHWELCOME;
801  }
802 
803  /* totally blank picture as progressive JPEG will only add details to it */
804  if (s->progressive) {
805  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
806  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
807  for (i = 0; i < s->nb_components; i++) {
808  int size = bw * bh * s->h_count[i] * s->v_count[i];
809  av_freep(&s->blocks[i]);
810  av_freep(&s->last_nnz[i]);
811  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
812  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
813  if (!s->blocks[i] || !s->last_nnz[i])
814  return AVERROR(ENOMEM);
815  s->block_stride[i] = bw * s->h_count[i];
816  }
817  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
818  }
819 
820  if (s->avctx->hwaccel) {
821  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
822  s->hwaccel_picture_private =
823  av_mallocz(hwaccel->frame_priv_data_size);
824  if (!s->hwaccel_picture_private)
825  return AVERROR(ENOMEM);
826 
827  ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
828  s->raw_image_buffer_size);
829  if (ret < 0)
830  return ret;
831  }
832 
833  return 0;
834 }
835 
836 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
837 {
838  int code;
839  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
840  if (code < 0 || code > 16) {
841  av_log(s->avctx, AV_LOG_ERROR,
842  "mjpeg_decode_dc: bad vlc: %d\n", dc_index);
843  return AVERROR_INVALIDDATA;
844  }
845 
846  *val = code ? get_xbits(&s->gb, code) : 0;
847  return 0;
848 }
849 
850 /* decode block and dequantize */
851 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
852  int dc_index, int ac_index, uint16_t *quant_matrix)
853 {
854  int code, i, j, level, val;
855 
856  /* DC coef */
857  int ret = mjpeg_decode_dc(s, dc_index, &val);
858  if (ret < 0)
859  return ret;
860 
861  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
862  s->last_dc[component] = val;
863  block[0] = av_clip_int16(val);
864  /* AC coefs */
865  i = 0;
866  {
867  OPEN_READER(re, &s->gb);
868  do {
869  UPDATE_CACHE(re, &s->gb);
870  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
871 
872  i += ((unsigned)code) >> 4;
873  code &= 0xf;
874  if (code) {
875  // GET_VLC updates the cache if parsing reaches the second stage.
876  // So we have at least MIN_CACHE_BITS - 9 > 15 bits left here
877  // and don't need to refill the cache.
878  {
879  int cache = GET_CACHE(re, &s->gb);
880  int sign = (~cache) >> 31;
881  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
882  }
883 
884  LAST_SKIP_BITS(re, &s->gb, code);
885 
886  if (i > 63) {
887  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
888  return AVERROR_INVALIDDATA;
889  }
890  j = s->permutated_scantable[i];
891  block[j] = level * quant_matrix[i];
892  }
893  } while (i < 63);
894  CLOSE_READER(re, &s->gb);
895  }
896 
897  return 0;
898 }
899 
901  int component, int dc_index,
902  uint16_t *quant_matrix, int Al)
903 {
904  unsigned val;
905  s->bdsp.clear_block(block);
906  int ret = mjpeg_decode_dc(s, dc_index, &val);
907  if (ret < 0)
908  return ret;
909 
910  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
911  s->last_dc[component] = val;
912  block[0] = val;
913  return 0;
914 }
915 
916 /* decode block and dequantize - progressive JPEG version */
918  uint8_t *last_nnz, int ac_index,
919  uint16_t *quant_matrix,
920  int Ss, int Se, int Al, int *EOBRUN)
921 {
922  int code, i, j, val, run;
923  unsigned level;
924 
925  if (*EOBRUN) {
926  (*EOBRUN)--;
927  return 0;
928  }
929 
930  {
931  OPEN_READER(re, &s->gb);
932  for (i = Ss; ; i++) {
933  UPDATE_CACHE(re, &s->gb);
934  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
935 
936  run = ((unsigned) code) >> 4;
937  code &= 0xF;
938  if (code) {
939  i += run;
940 
941  {
942  int cache = GET_CACHE(re, &s->gb);
943  int sign = (~cache) >> 31;
944  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
945  }
946 
947  LAST_SKIP_BITS(re, &s->gb, code);
948 
949  if (i >= Se) {
950  if (i == Se) {
951  j = s->permutated_scantable[Se];
952  block[j] = level * (quant_matrix[Se] << Al);
953  break;
954  }
955  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
956  return AVERROR_INVALIDDATA;
957  }
958  j = s->permutated_scantable[i];
959  block[j] = level * (quant_matrix[i] << Al);
960  } else {
961  if (run == 0xF) { // ZRL - skip 15 coefficients
962  i += 15;
963  if (i >= Se) {
964  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
965  return AVERROR_INVALIDDATA;
966  }
967  } else {
968  val = (1 << run);
969  if (run) {
970  // Given that GET_VLC reloads internally, we always
971  // have at least 16 bits in the cache here.
972  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
973  LAST_SKIP_BITS(re, &s->gb, run);
974  }
975  *EOBRUN = val - 1;
976  break;
977  }
978  }
979  }
980  CLOSE_READER(re, &s->gb);
981  }
982 
983  if (i > *last_nnz)
984  *last_nnz = i;
985 
986  return 0;
987 }
988 
989 #define REFINE_BIT(j) { \
990  UPDATE_CACHE(re, &s->gb); \
991  sign = block[j] >> 15; \
992  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
993  ((quant_matrix[i] ^ sign) - sign) << Al; \
994  LAST_SKIP_BITS(re, &s->gb, 1); \
995 }
996 
997 #define ZERO_RUN \
998 for (; ; i++) { \
999  if (i > last) { \
1000  i += run; \
1001  if (i > Se) { \
1002  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
1003  return -1; \
1004  } \
1005  break; \
1006  } \
1007  j = s->permutated_scantable[i]; \
1008  if (block[j]) \
1009  REFINE_BIT(j) \
1010  else if (run-- == 0) \
1011  break; \
1012 }
1013 
1014 /* decode block and dequantize - progressive JPEG refinement pass */
1016  uint8_t *last_nnz,
1017  int ac_index, uint16_t *quant_matrix,
1018  int Ss, int Se, int Al, int *EOBRUN)
1019 {
1020  int code, i = Ss, j, sign, val, run;
1021  int last = FFMIN(Se, *last_nnz);
1022 
1023  OPEN_READER(re, &s->gb);
1024  if (*EOBRUN) {
1025  (*EOBRUN)--;
1026  } else {
1027  for (; ; i++) {
1028  UPDATE_CACHE(re, &s->gb);
1029  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1030 
1031  if (code & 0xF) {
1032  run = ((unsigned) code) >> 4;
1033  val = SHOW_UBITS(re, &s->gb, 1);
1034  LAST_SKIP_BITS(re, &s->gb, 1);
1035  ZERO_RUN;
1036  j = s->permutated_scantable[i];
1037  val--;
1038  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1039  if (i == Se) {
1040  if (i > *last_nnz)
1041  *last_nnz = i;
1042  CLOSE_READER(re, &s->gb);
1043  return 0;
1044  }
1045  } else {
1046  run = ((unsigned) code) >> 4;
1047  if (run == 0xF) {
1048  ZERO_RUN;
1049  } else {
1050  val = run;
1051  run = (1 << run);
1052  if (val) {
1053  // Given that GET_VLC reloads internally, we always
1054  // have at least 16 bits in the cache here.
1055  run += SHOW_UBITS(re, &s->gb, val);
1056  LAST_SKIP_BITS(re, &s->gb, val);
1057  }
1058  *EOBRUN = run - 1;
1059  break;
1060  }
1061  }
1062  }
1063 
1064  if (i > *last_nnz)
1065  *last_nnz = i;
1066  }
1067 
1068  for (; i <= last; i++) {
1069  j = s->permutated_scantable[i];
1070  if (block[j])
1071  REFINE_BIT(j)
1072  }
1073  CLOSE_READER(re, &s->gb);
1074 
1075  return 0;
1076 }
1077 #undef REFINE_BIT
1078 #undef ZERO_RUN
1079 
1080 /* Handles 1 to 4 components */
1082 {
1083  int nb_components = s->nb_components_sos;
1084  int predictor = s->Ss;
1085  int point_transform = s->Al;
1086  int i, mb_x, mb_y;
1087  unsigned width;
1088  uint16_t (*buffer)[4];
1089  int left[4], top[4], topleft[4];
1090  const int linesize = s->linesize[0];
1091  const int mask = ((1 << s->bits) - 1) << point_transform;
1092  int resync_mb_y = 0;
1093  int resync_mb_x = 0;
1094  int vpred[6];
1095  int ret;
1096 
1097  if (!s->bayer && s->nb_components < 3)
1098  return AVERROR_INVALIDDATA;
1099  if (s->bayer && s->nb_components > 2)
1100  return AVERROR_INVALIDDATA;
1101  if (s->nb_components <= 0 || s->nb_components > 4)
1102  return AVERROR_INVALIDDATA;
1103  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1104  return AVERROR_INVALIDDATA;
1105  if (s->bayer) {
1106  if (s->rct || s->pegasus_rct)
1107  return AVERROR_INVALIDDATA;
1108  }
1109 
1110 
1111  for (i = 0; i < 6; i++)
1112  vpred[i] = 1 << (s->bits - 1);
1113 
1114  if (s->bayer)
1115  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1116  else
1117  width = s->mb_width;
1118 
1119  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1120  if (!s->ljpeg_buffer)
1121  return AVERROR(ENOMEM);
1122 
1123  buffer = s->ljpeg_buffer;
1124 
1125  for (i = 0; i < 4; i++)
1126  buffer[0][i] = 1 << (s->bits - 1);
1127 
1128  s->restart_count = -1;
1129 
1130  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1131  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1132 
1133  if (s->interlaced && s->bottom_field)
1134  ptr += linesize >> 1;
1135 
1136  for (i = 0; i < 4; i++)
1137  top[i] = left[i] = topleft[i] = buffer[0][i];
1138 
1139  for (mb_x = 0; mb_x < width; mb_x++) {
1140  int modified_predictor = predictor;
1141  int restart;
1142 
1143  ret = ff_mjpeg_handle_restart(s, &restart);
1144  if (ret < 0)
1145  return ret;
1146  if (restart) {
1147  resync_mb_x = mb_x;
1148  resync_mb_y = mb_y;
1149  for (i = 0; i < 4; i++)
1150  top[i] = left[i] = topleft[i] = 1 << (s->bits - 1);
1151  }
1152 
1153  if (get_bits_left(&s->gb) < 1) {
1154  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1155  return AVERROR_INVALIDDATA;
1156  }
1157 
1158  if (mb_y == resync_mb_y || mb_y == resync_mb_y + 1 && mb_x < resync_mb_x || !mb_x)
1159  modified_predictor = 1;
1160 
1161  for (i = 0; i < nb_components; i++) {
1162  int pred, dc;
1163 
1164  topleft[i] = top[i];
1165  top[i] = buffer[mb_x][i];
1166 
1167  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1168  if (ret < 0)
1169  return ret;
1170 
1171  if (!s->bayer || mb_x) {
1172  pred = left[i];
1173  } else { /* This path runs only for the first line in bayer images */
1174  vpred[i] += dc;
1175  pred = vpred[i] - dc;
1176  }
1177 
1178  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1179 
1180  left[i] = buffer[mb_x][i] =
1181  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1182  }
1183  }
1184  if (s->rct && s->nb_components == 4) {
1185  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1186  ptr[4 * mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1187  ptr[4 * mb_x + 1] = buffer[mb_x][1] + ptr[4 * mb_x + 2];
1188  ptr[4 * mb_x + 3] = buffer[mb_x][2] + ptr[4 * mb_x + 2];
1189  ptr[4 * mb_x + 0] = buffer[mb_x][3];
1190  }
1191  } else if (s->nb_components == 4) {
1192  for (i = 0; i < nb_components; i++) {
1193  int c = s->comp_index[i];
1194  if (s->bits <= 8) {
1195  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1196  ptr[4 * mb_x + 3 - c] = buffer[mb_x][i];
1197  }
1198  } else if (s->bits == 9) {
1199  return AVERROR_PATCHWELCOME;
1200  } else {
1201  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1202  ((uint16_t*)ptr)[4 * mb_x + c] = buffer[mb_x][i];
1203  }
1204  }
1205  }
1206  } else if (s->rct) {
1207  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1208  ptr[3 * mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1209  ptr[3 * mb_x + 0] = buffer[mb_x][1] + ptr[3 * mb_x + 1];
1210  ptr[3 * mb_x + 2] = buffer[mb_x][2] + ptr[3 * mb_x + 1];
1211  }
1212  } else if (s->pegasus_rct) {
1213  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1214  ptr[3 * mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1215  ptr[3 * mb_x + 0] = buffer[mb_x][1] + ptr[3 * mb_x + 1];
1216  ptr[3 * mb_x + 2] = buffer[mb_x][2] + ptr[3 * mb_x + 1];
1217  }
1218  } else if (s->bayer) {
1219  if (s->bits <= 8)
1220  return AVERROR_PATCHWELCOME;
1221  if (nb_components == 1) {
1222  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1223  for (mb_x = 0; mb_x < width; mb_x++)
1224  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1225  } else if (nb_components == 2) {
1226  for (mb_x = 0; mb_x < width; mb_x++) {
1227  ((uint16_t*)ptr)[2 * mb_x + 0] = buffer[mb_x][0];
1228  ((uint16_t*)ptr)[2 * mb_x + 1] = buffer[mb_x][1];
1229  }
1230  }
1231  } else {
1232  for (i = 0; i < nb_components; i++) {
1233  int c = s->comp_index[i];
1234  if (s->bits <= 8) {
1235  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1236  ptr[3 * mb_x + 2 - c] = buffer[mb_x][i];
1237  }
1238  } else if (s->bits == 9) {
1239  return AVERROR_PATCHWELCOME;
1240  } else {
1241  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1242  ((uint16_t*)ptr)[3 * mb_x + 2 - c] = buffer[mb_x][i];
1243  }
1244  }
1245  }
1246  }
1247  }
1248  return 0;
1249 }
1250 
1252 {
1253  int predictor = s->Ss;
1254  int point_transform = s->Al;
1255  int nb_components = s->nb_components_sos;
1256  int i, mb_x, mb_y, mask;
1257  int bits = (s->bits + 7) & ~7;
1258  int resync_mb_y = 0;
1259  int resync_mb_x = 0;
1260  int ret;
1261 
1262  point_transform += bits - s->bits;
1263  mask = ((1 << s->bits) - 1) << point_transform;
1264 
1265  av_assert0(nb_components >= 1 && nb_components <= 4);
1266 
1267  s->restart_count = -1;
1268 
1269  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1270  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1271  int restart;
1272  ret = ff_mjpeg_handle_restart(s, &restart);
1273  if (ret < 0)
1274  return ret;
1275  if (restart) {
1276  resync_mb_x = mb_x;
1277  resync_mb_y = mb_y;
1278  }
1279 
1280  if (get_bits_left(&s->gb) < 1) {
1281  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1282  return AVERROR_INVALIDDATA;
1283  }
1284 
1285  if (!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y + 1 && mb_x < resync_mb_x || s->interlaced) {
1286  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y + 1 && mb_x < resync_mb_x;
1287  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1288  for (i = 0; i < nb_components; i++) {
1289  uint8_t *ptr;
1290  uint16_t *ptr16;
1291  int n, h, v, x, y, c, j, linesize;
1292  n = s->nb_blocks[i];
1293  c = s->comp_index[i];
1294  h = s->h_scount[i];
1295  v = s->v_scount[i];
1296  x = 0;
1297  y = 0;
1298  linesize = s->linesize[c];
1299 
1300  if (bits > 8) linesize /= 2;
1301 
1302  for (j = 0; j < n; j++) {
1303  int pred, dc;
1304 
1305  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1306  if (ret < 0)
1307  return ret;
1308 
1309  if ( h * mb_x + x >= s->width
1310  || v * mb_y + y >= s->height) {
1311  // Nothing to do
1312  } else if (bits <= 8) {
1313  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); // FIXME optimize this crap
1314  if (y == 0 && toprow) {
1315  if (x == 0 && leftcol) {
1316  pred = 1 << (bits - 1);
1317  } else {
1318  pred = ptr[-1];
1319  }
1320  } else {
1321  if (x == 0 && leftcol) {
1322  pred = ptr[-linesize];
1323  } else {
1324  PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor);
1325  }
1326  }
1327 
1328  if (s->interlaced && s->bottom_field)
1329  ptr += linesize >> 1;
1330  pred &= mask;
1331  *ptr = pred + ((unsigned)dc << point_transform);
1332  } else {
1333  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2 * (linesize * (v * mb_y + y)) + 2 * (h * mb_x + x)); // FIXME optimize this crap
1334  if (y == 0 && toprow) {
1335  if (x == 0 && leftcol) {
1336  pred = 1 << (bits - 1);
1337  } else {
1338  pred = ptr16[-1];
1339  }
1340  } else {
1341  if (x == 0 && leftcol) {
1342  pred = ptr16[-linesize];
1343  } else {
1344  PREDICT(pred, ptr16[-linesize - 1], ptr16[-linesize], ptr16[-1], predictor);
1345  }
1346  }
1347 
1348  if (s->interlaced && s->bottom_field)
1349  ptr16 += linesize >> 1;
1350  pred &= mask;
1351  *ptr16 = pred + ((unsigned)dc << point_transform);
1352  }
1353  if (++x == h) {
1354  x = 0;
1355  y++;
1356  }
1357  }
1358  }
1359  } else {
1360  for (i = 0; i < nb_components; i++) {
1361  uint8_t *ptr;
1362  uint16_t *ptr16;
1363  int n, h, v, x, y, c, j, linesize, dc;
1364  n = s->nb_blocks[i];
1365  c = s->comp_index[i];
1366  h = s->h_scount[i];
1367  v = s->v_scount[i];
1368  x = 0;
1369  y = 0;
1370  linesize = s->linesize[c];
1371 
1372  if (bits > 8) linesize /= 2;
1373 
1374  for (j = 0; j < n; j++) {
1375  int pred;
1376 
1377  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1378  if (ret < 0)
1379  return ret;
1380 
1381  if ( h * mb_x + x >= s->width
1382  || v * mb_y + y >= s->height) {
1383  // Nothing to do
1384  } else if (bits <= 8) {
1385  ptr = s->picture_ptr->data[c] +
1386  (linesize * (v * mb_y + y)) +
1387  (h * mb_x + x); // FIXME optimize this crap
1388  PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor);
1389 
1390  pred &= mask;
1391  *ptr = pred + ((unsigned)dc << point_transform);
1392  } else {
1393  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2 * (linesize * (v * mb_y + y)) + 2 * (h * mb_x + x)); // FIXME optimize this crap
1394  PREDICT(pred, ptr16[-linesize - 1], ptr16[-linesize], ptr16[-1], predictor);
1395 
1396  pred &= mask;
1397  *ptr16 = pred + ((unsigned)dc << point_transform);
1398  }
1399 
1400  if (++x == h) {
1401  x = 0;
1402  y++;
1403  }
1404  }
1405  }
1406  }
1407  }
1408  }
1409  return 0;
1410 }
1411 
1413  uint8_t *dst, const uint8_t *src,
1414  int linesize, int lowres)
1415 {
1416  switch (lowres) {
1417  case 0: s->copy_block(dst, src, linesize, 8);
1418  break;
1419  case 1: copy_block4(dst, src, linesize, linesize, 4);
1420  break;
1421  case 2: copy_block2(dst, src, linesize, linesize, 2);
1422  break;
1423  case 3: *dst = *src;
1424  break;
1425  }
1426 }
1427 
1428 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1429 {
1430  int block_x, block_y;
1431  int size = 8 >> s->avctx->lowres;
1432  if (s->bits > 8) {
1433  for (block_y = 0; block_y < size; block_y++)
1434  for (block_x = 0; block_x < size; block_x++)
1435  *(uint16_t*)(ptr + 2 * block_x + block_y * linesize) <<= 16 - s->bits;
1436  } else {
1437  for (block_y = 0; block_y < size; block_y++)
1438  for (block_x = 0; block_x < size; block_x++)
1439  *(ptr + block_x + block_y * linesize) <<= 8 - s->bits;
1440  }
1441 }
1442 
1444 {
1445  int nb_components = s->nb_components_sos;
1446  int Ah = s->Ah;
1447  int Al = s->Al;
1448  const uint8_t *mb_bitmask = NULL;
1449  const AVFrame *reference = NULL;
1450  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1451  uint8_t *data[MAX_COMPONENTS];
1452  const uint8_t *reference_data[MAX_COMPONENTS];
1453  int linesize[MAX_COMPONENTS];
1454  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1455  int bytes_per_pixel = 1 + (s->bits > 8);
1456  int ret;
1457 
1458  if (s->avctx->codec_id == AV_CODEC_ID_MXPEG) {
1459  mb_bitmask = s->mb_bitmask;
1460  reference = s->reference;
1461  }
1462 
1463  if (mb_bitmask) {
1464  if (s->mb_bitmask_size != (s->mb_width * s->mb_height + 7) >> 3) {
1465  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1466  return AVERROR_INVALIDDATA;
1467  }
1468  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1469  }
1470 
1471  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1472  &chroma_v_shift);
1473  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1474  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1475 
1476  for (i = 0; i < nb_components; i++) {
1477  int c = s->comp_index[i];
1478  data[c] = s->picture_ptr->data[c];
1479  reference_data[c] = reference ? reference->data[c] : NULL;
1480  linesize[c] = s->linesize[c];
1481  s->coefs_finished[c] |= 1;
1482  }
1483 
1484 next_field:
1485  s->restart_count = -1;
1486 
1487  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1488  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1489  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1490  int restart;
1491 
1492  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
1493  if (s->restart_count < 0) {
1495  if (ret < 0)
1496  return ret;
1497  }
1498  restart = ff_mjpeg_should_restart(s);
1499  if (restart)
1500  align_get_bits(&s->gb);
1501  } else {
1502  ret = ff_mjpeg_handle_restart(s, &restart);
1503  if (ret < 0)
1504  return ret;
1505  }
1506  if (restart) {
1507  for (i = 0; i < nb_components; i++)
1508  s->last_dc[i] = (4 << s->bits);
1509  }
1510 
1511  if (get_bits_left(&s->gb) < 0) {
1512  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1513  -get_bits_left(&s->gb));
1514  return AVERROR_INVALIDDATA;
1515  }
1516  for (i = 0; i < nb_components; i++) {
1517  uint8_t *ptr;
1518  int n, h, v, x, y, c, j;
1519  int block_offset;
1520  n = s->nb_blocks[i];
1521  c = s->comp_index[i];
1522  h = s->h_scount[i];
1523  v = s->v_scount[i];
1524  x = 0;
1525  y = 0;
1526  for (j = 0; j < n; j++) {
1527  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1528  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1529 
1530  if (s->interlaced && s->bottom_field)
1531  block_offset += linesize[c] >> 1;
1532  if ( 8 * (h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1533  && 8 * (v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1534  ptr = data[c] + block_offset;
1535  } else
1536  ptr = NULL;
1537  if (!s->progressive) {
1538  if (copy_mb) {
1539  if (ptr)
1540  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1541  linesize[c], s->avctx->lowres);
1542 
1543  } else {
1544  s->bdsp.clear_block(s->block);
1545  if (decode_block(s, s->block, i,
1546  s->dc_index[i], s->ac_index[i],
1547  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1548  av_log(s->avctx, AV_LOG_ERROR,
1549  "error y=%d x=%d\n", mb_y, mb_x);
1550  return AVERROR_INVALIDDATA;
1551  }
1552  if (ptr && linesize[c]) {
1553  s->idsp.idct_put(ptr, linesize[c], s->block);
1554  if (s->bits & 7)
1555  shift_output(s, ptr, linesize[c]);
1556  }
1557  }
1558  } else {
1559  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1560  (h * mb_x + x);
1561  int16_t *block = s->blocks[c][block_idx];
1562  if (Ah)
1563  block[0] += get_bits1(&s->gb) *
1564  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1565  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1566  s->quant_matrixes[s->quant_sindex[i]],
1567  Al) < 0) {
1568  av_log(s->avctx, AV_LOG_ERROR,
1569  "error y=%d x=%d\n", mb_y, mb_x);
1570  return AVERROR_INVALIDDATA;
1571  }
1572  }
1573  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1574  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1575  mb_x, mb_y, x, y, c, s->bottom_field,
1576  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1577  if (++x == h) {
1578  x = 0;
1579  y++;
1580  }
1581  }
1582  }
1583  }
1584  }
1585 
1586  if (s->interlaced &&
1587  bytestream2_get_bytes_left(&s->gB) > 2 &&
1588  bytestream2_tell(&s->gB) > 2 &&
1589  s->gB.buffer[-2] == 0xFF &&
1590  s->gB.buffer[-1] == 0xD1) {
1591  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1592  s->bottom_field ^= 1;
1593 
1594  goto next_field;
1595  }
1596 
1597  return 0;
1598 }
1599 
1601 {
1602  int Ss = s->Ss;
1603  int Se = s->Se;
1604  int Ah = s->Ah;
1605  int Al = s->Al;
1606  int mb_x, mb_y;
1607  int EOBRUN = 0;
1608  int c = s->comp_index[0];
1609  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1610 
1611  av_assert0(Ss >= 0 && Ah >= 0 && Al >= 0);
1612  if (Se < Ss || Se > 63) {
1613  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", Ss, Se);
1614  return AVERROR_INVALIDDATA;
1615  }
1616 
1617  // s->coefs_finished is a bitmask for coefficients coded
1618  // Ss and Se are parameters telling start and end coefficients
1619  s->coefs_finished[c] |= (2ULL << Se) - (1ULL << Ss);
1620 
1621  s->restart_count = -1;
1622 
1623  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1624  int block_idx = mb_y * s->block_stride[c];
1625  int16_t (*block)[64] = &s->blocks[c][block_idx];
1626  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1627  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1628  int ret;
1629  int restart;
1630  ret = ff_mjpeg_handle_restart(s, &restart);
1631  if (ret < 0)
1632  return ret;
1633  if (restart)
1634  EOBRUN = 0;
1635 
1636  if (Ah)
1637  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1638  quant_matrix, Ss, Se, Al, &EOBRUN);
1639  else
1640  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1641  quant_matrix, Ss, Se, Al, &EOBRUN);
1642 
1643  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1645  if (ret < 0) {
1646  av_log(s->avctx, AV_LOG_ERROR,
1647  "error y=%d x=%d\n", mb_y, mb_x);
1648  return AVERROR_INVALIDDATA;
1649  }
1650  }
1651  }
1652  return 0;
1653 }
1654 
1656 {
1657  int mb_x, mb_y;
1658  int c;
1659  const int bytes_per_pixel = 1 + (s->bits > 8);
1660  const int block_size = s->lossless ? 1 : 8;
1661 
1662  for (c = 0; c < s->nb_components; c++) {
1663  uint8_t *data = s->picture_ptr->data[c];
1664  int linesize = s->linesize[c];
1665  int h = s->h_max / s->h_count[c];
1666  int v = s->v_max / s->v_count[c];
1667  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1668  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1669 
1670  if (~s->coefs_finished[c])
1671  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1672 
1673  if (s->interlaced && s->bottom_field)
1674  data += linesize >> 1;
1675 
1676  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1677  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1678  int block_idx = mb_y * s->block_stride[c];
1679  int16_t (*block)[64] = &s->blocks[c][block_idx];
1680  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1681  s->idsp.idct_put(ptr, linesize, *block);
1682  if (s->bits & 7)
1683  shift_output(s, ptr, linesize);
1684  ptr += bytes_per_pixel * 8 >> s->avctx->lowres;
1685  }
1686  }
1687  }
1688 }
1689 
1691 {
1692  int len, i, h, v;
1693  int index, id, ret;
1694  const int block_size = s->lossless ? 1 : 8;
1695 
1696  if (!s->got_picture) {
1697  av_log(s->avctx, AV_LOG_WARNING,
1698  "Can not process SOS before SOF, skipping\n");
1699  return AVERROR_INVALIDDATA;
1700  }
1701 
1702  ret = mjpeg_parse_len(s, &len, "sos");
1703  if (ret < 0)
1704  return ret;
1705  if (len < 1)
1706  return AVERROR_INVALIDDATA;
1707  s->nb_components_sos = bytestream2_get_byteu(&s->gB);
1708  if (s->nb_components_sos == 0 || s->nb_components_sos > MAX_COMPONENTS) {
1710  "decode_sos: nb_components (%d)",
1711  s->nb_components_sos);
1712  return AVERROR_PATCHWELCOME;
1713  }
1714  if (len != 4 + 2 * s->nb_components_sos) {
1715  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: len(%d) mismatch %d components\n", len, s->nb_components_sos);
1716  return AVERROR_INVALIDDATA;
1717  }
1718  for (i = 0; i < s->nb_components_sos; i++) {
1719  id = bytestream2_get_byteu(&s->gB);
1720  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1721  /* find component index */
1722  for (index = 0; index < s->nb_components; index++)
1723  if (id == s->component_id[index])
1724  break;
1725  if (index == s->nb_components) {
1726  av_log(s->avctx, AV_LOG_ERROR,
1727  "decode_sos: index(%d) out of components\n", index);
1728  return AVERROR_INVALIDDATA;
1729  }
1730  /* Metasoft MJPEG codec has Cb and Cr swapped */
1731  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1732  && s->nb_components_sos == 3 && s->nb_components == 3 && i)
1733  index = 3 - i;
1734 
1735  s->quant_sindex[i] = s->quant_index[index];
1736  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1737  s->h_scount[i] = s->h_count[index];
1738  s->v_scount[i] = s->v_count[index];
1739 
1740  s->comp_index[i] = index;
1741 
1742  uint8_t b = bytestream2_get_byteu(&s->gB);
1743  s->dc_index[i] = b >> 4;
1744  s->ac_index[i] = b & 0x0F;
1745 
1746  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1747  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1748  goto out_of_range;
1749  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1750  goto out_of_range;
1751  }
1752 
1753  s->Ss = bytestream2_get_byteu(&s->gB); /* JPEG Ss / lossless JPEG predictor / JPEG-LS NEAR */
1754  s->Se = bytestream2_get_byteu(&s->gB); /* JPEG Se / JPEG-LS ILV */
1755  uint8_t b = bytestream2_get_byteu(&s->gB);
1756  s->Ah = b >> 4; /* Ah */
1757  s->Al = b & 0x0F; /* Al */
1758 
1759  if (s->nb_components_sos > 1) {
1760  /* interleaved stream */
1761  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1762  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1763  } else if (!s->ls) { /* skip this for JPEG-LS */
1764  h = s->h_max / s->h_scount[0];
1765  v = s->v_max / s->v_scount[0];
1766  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1767  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1768  s->nb_blocks[0] = 1;
1769  s->h_scount[0] = 1;
1770  s->v_scount[0] = 1;
1771  }
1772 
1773  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1774  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1775  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1776  s->Ss, s->Al, s->Se, s->bits, s->mjpb_skiptosod,
1777  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), s->nb_components_sos);
1778 
1779 
1780  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1781  if (s->mjpb_skiptosod)
1782  bytestream2_skip(&s->gB, s->mjpb_skiptosod);
1783 
1784  if (s->avctx->hwaccel) {
1785  const uint8_t *buf_ptr;
1786  size_t buf_size;
1787 
1788  mjpeg_find_raw_scan_data(s, &buf_ptr, &buf_size);
1789 
1790  ret = FF_HW_CALL(s->avctx, decode_slice, buf_ptr, buf_size);
1791  if (ret < 0)
1792  return ret;
1793 
1794  } else {
1795  if (s->lossless) {
1796  av_assert0(s->picture_ptr == s->picture);
1797  if (CONFIG_JPEGLS_DECODER && s->ls) {
1798  if ((ret = ff_jpegls_decode_picture(s)) < 0)
1799  return ret;
1800  } else {
1801  if (s->rgb || s->bayer) {
1802  if ((ret = ljpeg_decode_rgb_scan(s)) < 0)
1803  return ret;
1804  } else {
1805  if ((ret = ljpeg_decode_yuv_scan(s)) < 0)
1806  return ret;
1807  }
1808  }
1809  } else {
1810  if (s->progressive && s->Ss) {
1811  av_assert0(s->picture_ptr == s->picture);
1812  if ((ret = mjpeg_decode_scan_progressive_ac(s)) < 0)
1813  return ret;
1814  } else {
1815  if ((ret = mjpeg_decode_scan(s)) < 0)
1816  return ret;
1817  }
1818  }
1819  }
1820 
1821  if (s->avctx->codec_id == AV_CODEC_ID_MEDIA100 ||
1822  s->avctx->codec_id == AV_CODEC_ID_MJPEGB ||
1823  s->avctx->codec_id == AV_CODEC_ID_THP) {
1824  /* Add the amount of bits read from the unescaped image data buffer
1825  * into the GetByteContext. */
1826  bytestream2_skipu(&s->gB, (get_bits_count(&s->gb) + 7) / 8);
1827  }
1828 
1829  return 0;
1830  out_of_range:
1831  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1832  return AVERROR_INVALIDDATA;
1833 }
1834 
1836 {
1837  if (bytestream2_get_be16u(&s->gB) != 4)
1838  return AVERROR_INVALIDDATA;
1839  s->restart_interval = bytestream2_get_be16u(&s->gB);
1840  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1841  s->restart_interval);
1842 
1843  return 0;
1844 }
1845 
1847 {
1848  int len, id, i;
1849 
1850  int ret = mjpeg_parse_len(s, &len, "app");
1851  if (ret < 0)
1852  return AVERROR_INVALIDDATA;
1853 
1854  if (len < 4) {
1855  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1856  return AVERROR_INVALIDDATA;
1857  av_log(s->avctx, AV_LOG_VERBOSE, "skipping APPx stub (len=%" PRId32 ")\n", len);
1858  goto out;
1859  }
1860 
1861  id = bytestream2_get_be32u(&s->gB);
1862  len -= 4;
1863 
1864  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1865  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1866  av_fourcc2str(av_bswap32(id)), id, len);
1867 
1868  /* This fourcc is used by non-avid files too, it holds some
1869  information, but it's always present in AVID-created files. */
1870  if (id == AV_RB32("AVI1")) {
1871  /* structure:
1872  4bytes AVI1
1873  1bytes polarity
1874  1bytes always zero
1875  4bytes field_size
1876  4bytes field_size_less_padding
1877  */
1878  if (len < 1)
1879  goto out;
1880  i = bytestream2_get_byteu(&s->gB); len--;
1881  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1882  goto out;
1883  }
1884 
1885  if (id == AV_RB32("JFIF")) {
1886  int t_w, t_h, v1, v2;
1887  if (len < 8)
1888  goto out;
1889  bytestream2_skipu(&s->gB, 1); /* the trailing zero-byte */
1890  v1 = bytestream2_get_byteu(&s->gB);
1891  v2 = bytestream2_get_byteu(&s->gB);
1892  bytestream2_skipu(&s->gB, 1);
1893 
1894  s->avctx->sample_aspect_ratio.num = bytestream2_get_be16u(&s->gB);
1895  s->avctx->sample_aspect_ratio.den = bytestream2_get_be16u(&s->gB);
1896  if ( s->avctx->sample_aspect_ratio.num <= 0
1897  || s->avctx->sample_aspect_ratio.den <= 0) {
1898  s->avctx->sample_aspect_ratio.num = 0;
1899  s->avctx->sample_aspect_ratio.den = 1;
1900  }
1901 
1902  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1903  av_log(s->avctx, AV_LOG_INFO,
1904  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1905  v1, v2,
1906  s->avctx->sample_aspect_ratio.num,
1907  s->avctx->sample_aspect_ratio.den);
1908 
1909  len -= 8;
1910  if (len >= 2) {
1911  t_w = bytestream2_get_byteu(&s->gB);
1912  t_h = bytestream2_get_byteu(&s->gB);
1913  if (t_w && t_h) {
1914  /* skip thumbnail */
1915  if (len - 10 - (t_w * t_h * 3) > 0)
1916  len -= t_w * t_h * 3;
1917  }
1918  len -= 2;
1919  }
1920  goto out;
1921  }
1922 
1923  if ( id == AV_RB32("Adob")
1924  && len >= 8
1925  && bytestream2_peek_byteu(&s->gB) == 'e'
1926  && bytestream2_peek_be32u(&s->gB) != AV_RB32("e_CM")) {
1927  bytestream2_skipu(&s->gB, 1); /* 'e' */
1928  bytestream2_skipu(&s->gB, 2); /* version */
1929  bytestream2_skipu(&s->gB, 2); /* flags0 */
1930  bytestream2_skipu(&s->gB, 2); /* flags1 */
1931  s->adobe_transform = bytestream2_get_byteu(&s->gB);
1932  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1933  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1934  len -= 8;
1935  goto out;
1936  }
1937 
1938  if (id == AV_RB32("LJIF")) {
1939  int rgb = s->rgb;
1940  int pegasus_rct = s->pegasus_rct;
1941  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1942  av_log(s->avctx, AV_LOG_INFO,
1943  "Pegasus lossless jpeg header found\n");
1944  if (len < 9)
1945  goto out;
1946  bytestream2_skipu(&s->gB, 2); /* version ? */
1947  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1948  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1949  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1950  switch (i = bytestream2_get_byteu(&s->gB)) {
1951  case 1:
1952  rgb = 1;
1953  pegasus_rct = 0;
1954  break;
1955  case 2:
1956  rgb = 1;
1957  pegasus_rct = 1;
1958  break;
1959  default:
1960  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1961  }
1962 
1963  len -= 9;
1964  if (s->bayer)
1965  goto out;
1966  if (s->got_picture)
1967  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1968  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1969  goto out;
1970  }
1971 
1972  s->rgb = rgb;
1973  s->pegasus_rct = pegasus_rct;
1974 
1975  goto out;
1976  }
1977  if (id == AV_RL32("colr") && len > 0) {
1978  s->colr = bytestream2_get_byteu(&s->gB);
1979  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1980  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1981  len--;
1982  goto out;
1983  }
1984  if (id == AV_RL32("xfrm") && len > 0) {
1985  s->xfrm = bytestream2_get_byteu(&s->gB);
1986  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1987  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1988  len--;
1989  goto out;
1990  }
1991 
1992  /* JPS extension by VRex */
1993  if (start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1994  int flags, layout, type;
1995  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1996  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1997 
1998  bytestream2_skipu(&s->gB, 4); len -= 4; /* JPS_ */
1999  bytestream2_skipu(&s->gB, 2); len -= 2; /* block length */
2000  bytestream2_skipu(&s->gB, 1); /* reserved */
2001  flags = bytestream2_get_byteu(&s->gB);
2002  layout = bytestream2_get_byteu(&s->gB);
2003  type = bytestream2_get_byteu(&s->gB);
2004  len -= 4;
2005 
2006  av_freep(&s->stereo3d);
2007  s->stereo3d = av_stereo3d_alloc();
2008  if (!s->stereo3d) {
2009  goto out;
2010  }
2011  if (type == 0) {
2012  s->stereo3d->type = AV_STEREO3D_2D;
2013  } else if (type == 1) {
2014  switch (layout) {
2015  case 0x01:
2016  s->stereo3d->type = AV_STEREO3D_LINES;
2017  break;
2018  case 0x02:
2019  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2020  break;
2021  case 0x03:
2022  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2023  break;
2024  }
2025  if (!(flags & 0x04)) {
2026  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2027  }
2028  }
2029  goto out;
2030  }
2031 
2032  /* EXIF metadata */
2033  if (start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2034  int ret;
2035 
2036  bytestream2_skipu(&s->gB, 2); // skip padding
2037  len -= 2;
2038 
2039  if (s->exif_metadata.entries) {
2040  av_log(s->avctx, AV_LOG_WARNING, "multiple EXIF\n");
2041  goto out;
2042  }
2043 
2044  ret = av_exif_parse_buffer(s->avctx, s->gB.buffer, len, &s->exif_metadata, AV_EXIF_TIFF_HEADER);
2045  if (ret < 0) {
2046  av_log(s->avctx, AV_LOG_WARNING, "unable to parse EXIF buffer\n");
2047  goto out;
2048  }
2049 
2050  bytestream2_skipu(&s->gB, ret);
2051  len -= ret;
2052 
2053  goto out;
2054  }
2055 
2056  /* Apple MJPEG-A */
2057  if ((start_code == APP1) && (len > (0x28 - 8))) {
2058  id = bytestream2_get_be32u(&s->gB);
2059  len -= 4;
2060  /* Apple MJPEG-A */
2061  if (id == AV_RB32("mjpg")) {
2062  /* structure:
2063  4bytes field size
2064  4bytes pad field size
2065  4bytes next off
2066  4bytes quant off
2067  4bytes huff off
2068  4bytes image off
2069  4bytes scan off
2070  4bytes data off
2071  */
2072  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2073  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2074  }
2075  }
2076 
2077  if (start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2078  int id2;
2079  unsigned seqno;
2080  unsigned nummarkers;
2081 
2082  id = bytestream2_get_be32u(&s->gB);
2083  id2 = bytestream2_get_be24u(&s->gB);
2084  len -= 7;
2085  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2086  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2087  goto out;
2088  }
2089 
2090  bytestream2_skipu(&s->gB, 1);
2091  seqno = bytestream2_get_byteu(&s->gB);
2092  len -= 2;
2093  if (seqno == 0) {
2094  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2095  goto out;
2096  }
2097 
2098  nummarkers = bytestream2_get_byteu(&s->gB);
2099  len -= 1;
2100  if (nummarkers == 0) {
2101  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2102  goto out;
2103  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2104  av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
2105  goto out;
2106  } else if (seqno > nummarkers) {
2107  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2108  goto out;
2109  }
2110 
2111  /* Allocate if this is the first APP2 we've seen. */
2112  if (s->iccnum == 0) {
2113  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2114  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2115  return AVERROR(ENOMEM);
2116  }
2117  s->iccnum = nummarkers;
2118  }
2119 
2120  if (s->iccentries[seqno - 1].data) {
2121  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2122  goto out;
2123  }
2124 
2125  s->iccentries[seqno - 1].length = len;
2126  s->iccentries[seqno - 1].data = av_malloc(len);
2127  if (!s->iccentries[seqno - 1].data) {
2128  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2129  return AVERROR(ENOMEM);
2130  }
2131 
2132  bytestream2_get_bufferu(&s->gB, s->iccentries[seqno - 1].data, len);
2133  len = 0;
2134  s->iccread++;
2135 
2136  if (s->iccread > s->iccnum)
2137  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2138  }
2139 
2140 out:
2141  /* slow but needed for extreme adobe jpegs */
2142  if (len < 0)
2143  av_log(s->avctx, AV_LOG_ERROR,
2144  "mjpeg: error, decode_app parser read over the end\n");
2145  if (len > 0)
2146  bytestream2_skipu(&s->gB, len);
2147 
2148  return 0;
2149 }
2150 
2152 {
2153  int len;
2154  int ret = mjpeg_parse_len(s, &len, "com");
2155  if (ret < 0)
2156  return ret;
2157  if (!len)
2158  return 0;
2159 
2160  int i;
2161  char *cbuf = av_malloc(len + 1);
2162  if (!cbuf)
2163  return AVERROR(ENOMEM);
2164 
2165  for (i = 0; i < len; i++)
2166  cbuf[i] = bytestream2_get_byteu(&s->gB);
2167  if (cbuf[i - 1] == '\n')
2168  cbuf[i - 1] = 0;
2169  else
2170  cbuf[i] = 0;
2171 
2172  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2173  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2174 
2175  /* buggy avid, it puts EOI only at every 10th frame */
2176  if (!strncmp(cbuf, "AVID", 4)) {
2177  parse_avid(s, cbuf, len);
2178  } else if (!strcmp(cbuf, "CS=ITU601"))
2179  s->cs_itu601 = 1;
2180  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2181  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2182  s->flipped = 1;
2183  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2184  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2185  s->multiscope = 2;
2186  }
2187 
2188  av_free(cbuf);
2189 
2190  return 0;
2191 }
2192 
2193 /* return the 8 bit start code value and update the search
2194  state. Return -1 if no start code found */
2195 int ff_mjpeg_find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2196 {
2197  const uint8_t *buf_ptr;
2198  int val;
2199 
2200  buf_ptr = *pbuf_ptr;
2201  while ((buf_ptr = memchr(buf_ptr, 0xff, buf_end - buf_ptr))) {
2202  buf_ptr++;
2203  while (buf_ptr < buf_end) {
2204  val = *buf_ptr++;
2205  if (val != 0xff) {
2206  if ((val >= SOF0) && (val <= COM))
2207  goto found;
2208  break;
2209  }
2210  }
2211  }
2212  buf_ptr = buf_end;
2213  val = -1;
2214 found:
2215  ff_dlog(NULL, "find_marker skipped %td bytes\n",
2216  (buf_ptr - *pbuf_ptr) - (val < 0 ? 0 : 2));
2217  *pbuf_ptr = buf_ptr;
2218  return val;
2219 }
2220 
2222  const uint8_t **pbuf_ptr, size_t *pbuf_size)
2223 {
2224  const uint8_t *buf_ptr = s->gB.buffer;
2225  const uint8_t *buf_end = buf_ptr + bytestream2_get_bytes_left(&s->gB);
2226 
2227  /* Find size of image data buffer (including restart markers).
2228  * No unescaping is performed. */
2229  const uint8_t *ptr = buf_ptr;
2230  while ((ptr = memchr(ptr, 0xff, buf_end - ptr))) {
2231  ptr++;
2232  if (ptr < buf_end) {
2233  uint8_t x = *ptr++;
2234  /* Discard multiple optional 0xFF fill bytes. */
2235  while (x == 0xff && ptr < buf_end)
2236  x = *ptr++;
2237  if (x && (x < RST0 || x > RST7)) {
2238  /* Non-restart marker */
2239  ptr -= 2;
2240  goto found_hw;
2241  }
2242  }
2243  }
2244  ptr = buf_end;
2245 found_hw:
2246  *pbuf_ptr = buf_ptr;
2247  *pbuf_size = ptr - buf_ptr;
2248  bytestream2_skipu(&s->gB, *pbuf_size);
2249 }
2250 
2252 {
2253  const uint8_t *buf_ptr = s->gB.buffer;
2254  const uint8_t *buf_end = buf_ptr + bytestream2_get_bytes_left(&s->gB);
2255  const uint8_t *unescaped_buf_ptr;
2256  size_t unescaped_buf_size;
2257 
2258  if (s->avctx->codec_id == AV_CODEC_ID_MEDIA100 ||
2259  s->avctx->codec_id == AV_CODEC_ID_MJPEGB ||
2260  s->avctx->codec_id == AV_CODEC_ID_THP) {
2261  /* The image data buffer is already unescaped. The only way to
2262  * find the size of the buffer is by fully decoding it. */
2263  unescaped_buf_ptr = buf_ptr;
2264  unescaped_buf_size = buf_end - buf_ptr;
2265  goto the_end;
2266  }
2267 
2268  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - buf_ptr);
2269  if (!s->buffer)
2270  return AVERROR(ENOMEM);
2271 
2272  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2273  if (!s->ls) {
2274  const uint8_t *src = buf_ptr;
2275  const uint8_t *ptr = src;
2276  uint8_t *dst = s->buffer;
2277  PutByteContext pb;
2278 
2279  bytestream2_init_writer(&pb, dst, buf_end - src);
2280 
2281  while ((ptr = memchr(ptr, 0xff, buf_end - ptr))) {
2282  ptr++;
2283  if (ptr < buf_end) {
2284  /* Copy verbatim data. */
2285  ptrdiff_t length = (ptr - 1) - src;
2286  if (length > 0)
2287  bytestream2_put_bufferu(&pb, src, length);
2288 
2289  uint8_t x = *ptr++;
2290  /* Discard multiple optional 0xFF fill bytes. */
2291  while (x == 0xff && ptr < buf_end)
2292  x = *ptr++;
2293 
2294  src = ptr;
2295  if (x == 0) {
2296  /* Stuffed zero byte */
2297  bytestream2_put_byteu(&pb, 0xff);
2298  } else if (x >= RST0 && x <= RST7) {
2299  /* Restart marker */
2300  goto found;
2301  } else {
2302  /* Non-restart marker */
2303  ptr -= 2;
2304  goto found;
2305  }
2306  }
2307  }
2308  /* Copy remaining verbatim data. */
2309  ptr = buf_end;
2310  ptrdiff_t length = ptr - src;
2311  if (length > 0)
2312  bytestream2_put_bufferu(&pb, src, length);
2313 
2314 found:
2315  unescaped_buf_ptr = s->buffer;
2316  unescaped_buf_size = bytestream2_tell_p(&pb);
2317  memset(s->buffer + unescaped_buf_size, 0,
2319 
2320  bytestream2_skipu(&s->gB, ptr - buf_ptr);
2321 
2322  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %td bytes\n",
2323  (buf_end - buf_ptr) - (unescaped_buf_size));
2324  } else {
2325  const uint8_t *src = buf_ptr;
2326  const uint8_t *ptr = src;
2327  uint8_t *dst = s->buffer;
2328  PutBitContext pb;
2329 
2330  init_put_bits(&pb, dst, buf_end - src);
2331 
2332  while ((ptr = memchr(ptr, 0xff, buf_end - ptr))) {
2333  ptr++;
2334  if (ptr < buf_end) {
2335  /* Copy verbatim data. */
2336  ptrdiff_t length = (ptr - 1) - src;
2337  if (length > 0)
2338  ff_copy_bits(&pb, src, length * 8);
2339 
2340  uint8_t x = *ptr++;
2341  /* Discard multiple optional 0xFF fill bytes. */
2342  while (x == 0xff && ptr < buf_end)
2343  x = *ptr++;
2344 
2345  src = ptr;
2346  if (!(x & 0x80)) {
2347  /* Stuffed zero bit */
2348  put_bits(&pb, 15, 0x7f80 | x);
2349  } else if (x >= RST0 && x <= RST7) {
2350  /* Restart marker */
2351  goto found_ls;
2352  } else {
2353  /* Non-restart marker */
2354  ptr -= 2;
2355  goto found_ls;
2356  }
2357  }
2358  }
2359  /* Copy remaining verbatim data. */
2360  ptr = buf_end;
2361  ptrdiff_t length = ptr - src;
2362  if (length > 0)
2363  ff_copy_bits(&pb, src, length * 8);
2364 
2365 found_ls:
2366  flush_put_bits(&pb);
2367 
2368  unescaped_buf_ptr = dst;
2369  unescaped_buf_size = put_bytes_output(&pb);
2370  memset(s->buffer + unescaped_buf_size, 0,
2372 
2373  bytestream2_skipu(&s->gB, ptr - buf_ptr);
2374  }
2375 
2376 the_end:
2377  return init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2378 }
2379 
2381 {
2382  int i;
2383 
2384  if (s->iccentries) {
2385  for (i = 0; i < s->iccnum; i++)
2386  av_freep(&s->iccentries[i].data);
2387  av_freep(&s->iccentries);
2388  }
2389 
2390  s->iccread = 0;
2391  s->iccnum = 0;
2392 }
2393 
2395  int *got_frame, const AVPacket *avpkt,
2396  const uint8_t *buf, const int buf_size)
2397 {
2398  MJpegDecodeContext *s = avctx->priv_data;
2399  const uint8_t *buf_end, *buf_ptr;
2400  int hshift, vshift;
2401  int start_code;
2402  int index;
2403  int ret = 0;
2404  int is16bit;
2405 
2406  s->force_pal8 = 0;
2407 
2408  s->buf_size = buf_size;
2409 
2410  av_exif_free(&s->exif_metadata);
2411  av_freep(&s->stereo3d);
2412  s->adobe_transform = -1;
2413 
2414  if (s->iccnum != 0)
2416 
2417 redo_for_pal8:
2418  buf_ptr = buf;
2419  buf_end = buf + buf_size;
2420  while (buf_ptr < buf_end) {
2421  /* find start next marker */
2422  start_code = ff_mjpeg_find_marker(&buf_ptr, buf_end);
2423  /* EOF */
2424  if (start_code < 0)
2425  break;
2426 
2427  ptrdiff_t bytes_left = buf_end - buf_ptr;
2428  if (bytes_left > INT_MAX / 8) {
2429  av_log(avctx, AV_LOG_ERROR,
2430  "MJPEG packet 0x%x too big (%td/%d), corrupt data?\n",
2431  start_code, bytes_left, buf_size);
2432  return AVERROR_INVALIDDATA;
2433  }
2434  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n",
2435  start_code, buf_end - buf_ptr);
2436 
2437  bytestream2_init(&s->gB, buf_ptr, bytes_left);
2438 
2439  if (avctx->debug & FF_DEBUG_STARTCODE)
2440  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2441 
2442  /* process markers */
2443  if (start_code >= RST0 && start_code <= RST7) {
2444  av_log(avctx, AV_LOG_DEBUG,
2445  "restart marker: %d\n", start_code & 0x0f);
2446  /* APP fields */
2447  } else if (start_code >= APP0 && start_code <= APP15) {
2448  if ((ret = mjpeg_decode_app(s, start_code)) < 0)
2449  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2450  av_err2str(ret));
2451  /* Comment */
2452  } else if (start_code == COM) {
2453  ret = mjpeg_decode_com(s);
2454  if (ret < 0)
2455  return ret;
2456  } else if (start_code == DQT) {
2458  if (ret < 0)
2459  return ret;
2460  }
2461 
2462  ret = -1;
2463 
2464  if (!CONFIG_JPEGLS_DECODER &&
2465  (start_code == SOF55 || start_code == LSE)) {
2466  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2467  return AVERROR(ENOSYS);
2468  }
2469 
2470  if (avctx->skip_frame == AVDISCARD_ALL) {
2471  switch (start_code) {
2472  case SOF0:
2473  case SOF1:
2474  case SOF2:
2475  case SOF3:
2476  case SOF55:
2477  break;
2478  default:
2479  goto skip;
2480  }
2481  }
2482 
2483  switch (start_code) {
2484  case SOI:
2485  s->restart_interval = 0;
2486  s->raw_image_buffer = buf_ptr;
2487  s->raw_image_buffer_size = buf_end - buf_ptr;
2488  /* nothing to do on SOI */
2489  break;
2490  case DHT:
2491  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2492  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2493  goto fail;
2494  }
2495  break;
2496  case SOF0:
2497  case SOF1:
2498  if (start_code == SOF0)
2500  else
2502  s->lossless = 0;
2503  s->ls = 0;
2504  s->progressive = 0;
2505  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2506  goto fail;
2507  break;
2508  case SOF2:
2510  s->lossless = 0;
2511  s->ls = 0;
2512  s->progressive = 1;
2513  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2514  goto fail;
2515  break;
2516  case SOF3:
2518 #if FF_API_CODEC_PROPS
2522 #endif
2523  s->lossless = 1;
2524  s->ls = 0;
2525  s->progressive = 0;
2526  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2527  goto fail;
2528  break;
2529  case SOF55:
2531 #if FF_API_CODEC_PROPS
2535 #endif
2536  s->lossless = 1;
2537  s->ls = 1;
2538  s->progressive = 0;
2539  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2540  goto fail;
2541  break;
2542  case LSE:
2543  if (!CONFIG_JPEGLS_DECODER ||
2544  (ret = ff_jpegls_decode_lse(s)) < 0)
2545  goto fail;
2546  if (ret == 1)
2547  goto redo_for_pal8;
2548  break;
2549  case EOI:
2550 eoi_parser:
2551  if (!avctx->hwaccel &&
2552  s->progressive && s->cur_scan && s->got_picture)
2554  s->cur_scan = 0;
2555  if (!s->got_picture) {
2556  av_log(avctx, AV_LOG_WARNING,
2557  "Found EOI before any SOF, ignoring\n");
2558  break;
2559  }
2560  if (s->interlaced) {
2561  s->bottom_field ^= 1;
2562  /* if not bottom field, do not output image yet */
2563  if (s->bottom_field == !s->interlace_polarity)
2564  break;
2565  }
2566  if (avctx->hwaccel) {
2567  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2568  if (ret < 0)
2569  return ret;
2570 
2571  av_freep(&s->hwaccel_picture_private);
2572  }
2573  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2574  return ret;
2575  if (s->lossless)
2576  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2577  *got_frame = 1;
2578  s->got_picture = 0;
2579 
2580  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2581  int qp = FFMAX3(s->qscale[0],
2582  s->qscale[1],
2583  s->qscale[2]);
2584 
2585  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2586  }
2587 
2588  goto the_end;
2589  case SOS:
2590  s->cur_scan++;
2591 
2592  if ((ret = ff_mjpeg_decode_sos(s)) < 0 &&
2593  (avctx->err_recognition & AV_EF_EXPLODE))
2594  goto fail;
2595  break;
2596  case DRI:
2597  if ((ret = mjpeg_decode_dri(s)) < 0)
2598  return ret;
2599  break;
2600  case SOF5:
2601  case SOF6:
2602  case SOF7:
2603  case SOF9:
2604  case SOF10:
2605  case SOF11:
2606  case SOF13:
2607  case SOF14:
2608  case SOF15:
2609  case JPG:
2610  av_log(avctx, AV_LOG_ERROR,
2611  "mjpeg: unsupported coding type (%x)\n", start_code);
2612  break;
2613  }
2614 
2615  if (avctx->skip_frame == AVDISCARD_ALL) {
2616  switch (start_code) {
2617  case SOF0:
2618  case SOF1:
2619  case SOF2:
2620  case SOF3:
2621  case SOF55:
2622  s->got_picture = 0;
2623  goto the_end_no_picture;
2624  }
2625  }
2626 
2627 skip:
2628  /* eof process start code */
2629  buf_ptr += bytestream2_tell(&s->gB);
2630  av_log(avctx, AV_LOG_DEBUG,
2631  "marker parser used %d bytes\n",
2632  bytestream2_tell(&s->gB));
2633  }
2634  if (s->got_picture && s->cur_scan) {
2635  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2636  goto eoi_parser;
2637  }
2638  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2639  return AVERROR_INVALIDDATA;
2640 fail:
2641  s->got_picture = 0;
2642  return ret;
2643 the_end:
2644 
2645  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2646 
2647  if (AV_RB32(s->upscale_h)) {
2648  int p;
2650  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2651  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2652  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2653  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2654  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2655  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2656  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2657  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2658  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2659  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2660  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2661  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2662  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2663  );
2664  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2665  if (ret)
2666  return ret;
2667 
2668  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2669  for (p = 0; p < s->nb_components; p++) {
2670  uint8_t *line = s->picture_ptr->data[p];
2671  int w = s->width;
2672  int h = s->height;
2673  if (!s->upscale_h[p])
2674  continue;
2675  if (p == 1 || p == 2) {
2676  w = AV_CEIL_RSHIFT(w, hshift);
2677  h = AV_CEIL_RSHIFT(h, vshift);
2678  }
2679  if (s->upscale_v[p] == 1)
2680  h = (h + 1) >> 1;
2681  av_assert0(w > 0);
2682  for (int i = 0; i < h; i++) {
2683  if (s->upscale_h[p] == 1) {
2684  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2685  else line[w - 1] = line[(w - 1) / 2];
2686  for (index = w - 2; index > 0; index--) {
2687  if (is16bit)
2688  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2689  else
2690  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2691  }
2692  } else if (s->upscale_h[p] == 2) {
2693  if (is16bit) {
2694  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2695  if (w > 1)
2696  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2697  } else {
2698  line[w - 1] = line[(w - 1) / 3];
2699  if (w > 1)
2700  line[w - 2] = line[w - 1];
2701  }
2702  for (index = w - 3; index > 0; index--) {
2703  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2704  }
2705  } else if (s->upscale_h[p] == 4) {
2706  if (is16bit) {
2707  uint16_t *line16 = (uint16_t *) line;
2708  line16[w - 1] = line16[(w - 1) >> 2];
2709  if (w > 1)
2710  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2711  if (w > 2)
2712  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2713  } else {
2714  line[w - 1] = line[(w - 1) >> 2];
2715  if (w > 1)
2716  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2717  if (w > 2)
2718  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2719  }
2720  for (index = w - 4; index > 0; index--)
2721  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2722  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2723  }
2724  line += s->linesize[p];
2725  }
2726  }
2727  }
2728  if (AV_RB32(s->upscale_v)) {
2729  int p;
2731  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2732  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2733  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2734  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2735  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2736  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2737  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2738  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2739  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2740  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2741  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2742  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2743  );
2744  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2745  if (ret)
2746  return ret;
2747 
2748  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2749  for (p = 0; p < s->nb_components; p++) {
2750  uint8_t *dst;
2751  int w = s->width;
2752  int h = s->height;
2753  if (!s->upscale_v[p])
2754  continue;
2755  if (p == 1 || p == 2) {
2756  w = AV_CEIL_RSHIFT(w, hshift);
2757  h = AV_CEIL_RSHIFT(h, vshift);
2758  }
2759  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2760  for (int i = h - 1; i; i--) {
2761  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2762  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2763  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2764  memcpy(dst, src1, w);
2765  } else {
2766  for (index = 0; index < w; index++)
2767  dst[index] = (src1[index] + src2[index]) >> 1;
2768  }
2769  dst -= s->linesize[p];
2770  }
2771  }
2772  }
2773  if (s->flipped && !s->rgb) {
2774  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2775  if (ret)
2776  return ret;
2777 
2778  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2779  for (index = 0; index < s->nb_components; index++) {
2780  int h = frame->height;
2781  if (index && index < 3)
2782  h = AV_CEIL_RSHIFT(h, vshift);
2783  if (frame->data[index]) {
2784  frame->data[index] += (h - 1) * frame->linesize[index];
2785  frame->linesize[index] *= -1;
2786  }
2787  }
2788  }
2789 
2790  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2791  av_assert0(s->nb_components == 3);
2792  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2793  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2794  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2795  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2796  }
2797 
2798  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2799  int w = s->picture_ptr->width;
2800  int h = s->picture_ptr->height;
2801  av_assert0(s->nb_components == 4);
2802  for (int i = 0; i < h; i++) {
2803  int j;
2804  uint8_t *dst[4];
2805  for (index = 0; index < 4; index++) {
2806  dst[index] = s->picture_ptr->data[index]
2807  + s->picture_ptr->linesize[index]*i;
2808  }
2809  for (j = 0; j < w; j++) {
2810  int k = dst[3][j];
2811  int r = dst[0][j] * k;
2812  int g = dst[1][j] * k;
2813  int b = dst[2][j] * k;
2814  dst[0][j] = g * 257 >> 16;
2815  dst[1][j] = b * 257 >> 16;
2816  dst[2][j] = r * 257 >> 16;
2817  }
2818  memset(dst[3], 255, w);
2819  }
2820  }
2821  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2822  int w = s->picture_ptr->width;
2823  int h = s->picture_ptr->height;
2824  av_assert0(s->nb_components == 4);
2825  for (int i = 0; i < h; i++) {
2826  int j;
2827  uint8_t *dst[4];
2828  for (index = 0; index < 4; index++) {
2829  dst[index] = s->picture_ptr->data[index]
2830  + s->picture_ptr->linesize[index]*i;
2831  }
2832  for (j = 0; j < w; j++) {
2833  int k = dst[3][j];
2834  int r = (255 - dst[0][j]) * k;
2835  int g = (128 - dst[1][j]) * k;
2836  int b = (128 - dst[2][j]) * k;
2837  dst[0][j] = r * 257 >> 16;
2838  dst[1][j] = (g * 257 >> 16) + 128;
2839  dst[2][j] = (b * 257 >> 16) + 128;
2840  }
2841  memset(dst[3], 255, w);
2842  }
2843  }
2844 
2845  if (s->stereo3d) {
2847  if (stereo) {
2848  stereo->type = s->stereo3d->type;
2849  stereo->flags = s->stereo3d->flags;
2850  }
2851  av_freep(&s->stereo3d);
2852  }
2853 
2854  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2855  AVFrameSideData *sd;
2856  size_t offset = 0;
2857  int total_size = 0;
2858 
2859  /* Sum size of all parts. */
2860  for (int i = 0; i < s->iccnum; i++)
2861  total_size += s->iccentries[i].length;
2862 
2863  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2864  if (ret < 0) {
2865  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2866  return ret;
2867  }
2868 
2869  if (sd) {
2870  /* Reassemble the parts, which are now in-order. */
2871  for (int i = 0; i < s->iccnum; i++) {
2872  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2873  offset += s->iccentries[i].length;
2874  }
2875  }
2876  }
2877 
2878  if (s->exif_metadata.entries) {
2879  ret = ff_decode_exif_attach_ifd(avctx, frame, &s->exif_metadata);
2880  av_exif_free(&s->exif_metadata);
2881  if (ret < 0)
2882  av_log(avctx, AV_LOG_WARNING, "couldn't attach EXIF metadata\n");
2883  }
2884 
2885  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2886  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2887  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2888  avctx->coded_height > s->orig_height) {
2889  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2890  frame->crop_top = frame->height - avctx->height;
2891  }
2892 
2893 the_end_no_picture:
2894  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %td bytes\n",
2895  buf_end - buf_ptr);
2896  return buf_ptr - buf;
2897 }
2898 
2899 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2900  AVPacket *avpkt)
2901 {
2902  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2903  avpkt, avpkt->data, avpkt->size);
2904 }
2905 
2906 
2907 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2908  * even without having called ff_mjpeg_decode_init(). */
2910 {
2911  MJpegDecodeContext *s = avctx->priv_data;
2912  int i, j;
2913 
2914  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2915  av_log(avctx, AV_LOG_INFO, "Single field\n");
2916  }
2917 
2918  av_frame_free(&s->picture);
2919  s->picture_ptr = NULL;
2920 
2921  av_frame_free(&s->smv_frame);
2922 
2923  av_freep(&s->buffer);
2924  av_freep(&s->stereo3d);
2925  av_freep(&s->ljpeg_buffer);
2926  s->ljpeg_buffer_size = 0;
2927 
2928  for (i = 0; i < 3; i++) {
2929  for (j = 0; j < 4; j++)
2930  ff_vlc_free(&s->vlcs[i][j]);
2931  }
2932  for (i = 0; i < MAX_COMPONENTS; i++) {
2933  av_freep(&s->blocks[i]);
2934  av_freep(&s->last_nnz[i]);
2935  }
2936  av_exif_free(&s->exif_metadata);
2937 
2939 
2940  av_freep(&s->hwaccel_picture_private);
2941  av_freep(&s->jls_state);
2942 
2943  return 0;
2944 }
2945 
2947 {
2948  MJpegDecodeContext *s = avctx->priv_data;
2949  s->got_picture = 0;
2950 
2951  s->smv_next_frame = 0;
2952  av_frame_unref(s->smv_frame);
2953 }
2954 
2955 #if CONFIG_MJPEG_DECODER
2956 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2957 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2958 static const AVOption options[] = {
2959  { "extern_huff", "Use external huffman table.",
2960  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2961  { NULL },
2962 };
2963 
2964 static const AVClass mjpegdec_class = {
2965  .class_name = "MJPEG decoder",
2966  .item_name = av_default_item_name,
2967  .option = options,
2968  .version = LIBAVUTIL_VERSION_INT,
2969 };
2970 
2971 const FFCodec ff_mjpeg_decoder = {
2972  .p.name = "mjpeg",
2973  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2974  .p.type = AVMEDIA_TYPE_VIDEO,
2975  .p.id = AV_CODEC_ID_MJPEG,
2976  .priv_data_size = sizeof(MJpegDecodeContext),
2980  .flush = decode_flush,
2981  .p.capabilities = AV_CODEC_CAP_DR1,
2982  .p.max_lowres = 3,
2983  .p.priv_class = &mjpegdec_class,
2984  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2985  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2988  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2989 #if CONFIG_MJPEG_NVDEC_HWACCEL
2990  HWACCEL_NVDEC(mjpeg),
2991 #endif
2992 #if CONFIG_MJPEG_VAAPI_HWACCEL
2993  HWACCEL_VAAPI(mjpeg),
2994 #endif
2995  NULL
2996  },
2997 };
2998 #endif
2999 #if CONFIG_THP_DECODER
3000 const FFCodec ff_thp_decoder = {
3001  .p.name = "thp",
3002  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
3003  .p.type = AVMEDIA_TYPE_VIDEO,
3004  .p.id = AV_CODEC_ID_THP,
3005  .priv_data_size = sizeof(MJpegDecodeContext),
3009  .flush = decode_flush,
3010  .p.capabilities = AV_CODEC_CAP_DR1,
3011  .p.max_lowres = 3,
3012  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3013 };
3014 #endif
3015 
3016 #if CONFIG_SMVJPEG_DECODER
3017 // SMV JPEG just stacks several output frames into one JPEG picture
3018 // we handle that by setting up the cropping parameters appropriately
3019 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
3020 {
3021  MJpegDecodeContext *s = avctx->priv_data;
3022 
3023  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3024 
3025  frame->width = avctx->coded_width;
3026  frame->height = avctx->coded_height;
3027  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3028  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3029 
3030  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3031  s->smv_frame->pts += s->smv_frame->duration;
3032  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3033 
3034  if (s->smv_next_frame == 0)
3035  av_frame_unref(s->smv_frame);
3036 }
3037 
3038 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3039 {
3040  MJpegDecodeContext *s = avctx->priv_data;
3041  AVPacket *const pkt = avctx->internal->in_pkt;
3042  int got_frame = 0;
3043  int ret;
3044 
3045  if (s->smv_next_frame > 0)
3046  goto return_frame;
3047 
3048  ret = ff_decode_get_packet(avctx, pkt);
3049  if (ret < 0)
3050  return ret;
3051 
3052  av_frame_unref(s->smv_frame);
3053 
3054  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3055  s->smv_frame->pkt_dts = pkt->dts;
3057  if (ret < 0)
3058  return ret;
3059 
3060  if (!got_frame)
3061  return AVERROR(EAGAIN);
3062 
3063  // packet duration covers all the frames in the packet
3064  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3065 
3066 return_frame:
3067  av_assert0(s->smv_frame->buf[0]);
3068  ret = av_frame_ref(frame, s->smv_frame);
3069  if (ret < 0)
3070  return ret;
3071 
3072  smv_process_frame(avctx, frame);
3073  return 0;
3074 }
3075 
3076 const FFCodec ff_smvjpeg_decoder = {
3077  .p.name = "smvjpeg",
3078  CODEC_LONG_NAME("SMV JPEG"),
3079  .p.type = AVMEDIA_TYPE_VIDEO,
3080  .p.id = AV_CODEC_ID_SMVJPEG,
3081  .priv_data_size = sizeof(MJpegDecodeContext),
3084  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3085  .flush = decode_flush,
3086  .p.capabilities = AV_CODEC_CAP_DR1,
3087  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3089 };
3090 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
flags
const SwsFlags flags[]
Definition: swscale.c:61
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1413
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:249
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:360
opt.h
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:881
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1208
out
static FILE * out
Definition: movenc.c:55
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:99
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1412
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
APP1
@ APP1
Definition: mjpeg.h:80
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:997
SOF0
@ SOF0
Definition: mjpeg.h:39
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1406
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:573
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:254
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:114
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:588
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:690
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s)
Definition: mjpegdec.c:1081
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AV_CODEC_ID_MEDIA100
@ AV_CODEC_ID_MEDIA100
Definition: codec_id.h:322
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:213
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
AV_CODEC_ID_MXPEG
@ AV_CODEC_ID_MXPEG
Definition: codec_id.h:198
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1383
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:251
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:248
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1428
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:123
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1670
fail
#define fail()
Definition: checkasm.h:218
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:109
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2394
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2151
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:61
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2195
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:658
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1600
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
ff_mjpeg_handle_restart
static int ff_mjpeg_handle_restart(MJpegDecodeContext *s, int *restart)
Definition: mjpegdec.h:214
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s)
Definition: mjpegdec.c:1690
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:191
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:900
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1646
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:212
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:189
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
ff_mjpeg_should_restart
static int ff_mjpeg_should_restart(MJpegDecodeContext *s)
Definition: mjpegdec.h:196
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:104
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(const PutByteContext *p)
Definition: bytestream.h:197
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2380
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2909
mjpeg_find_raw_scan_data
static void mjpeg_find_raw_scan_data(MJpegDecodeContext *s, const uint8_t **pbuf_ptr, size_t *pbuf_size)
Definition: mjpegdec.c:2221
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
SOF55
@ SOF55
JPEG-LS.
Definition: mjpeg.h:103
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
mjpeg_parse_len
static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
Definition: mjpegdec.c:193
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:529
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2430
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:530
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1655
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:205
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
Definition: mjpegdec.c:836
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int Ss, int Se, int Al, int *EOBRUN)
Definition: mjpegdec.c:917
options
Definition: swscale.c:43
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:235
MJpegDecodeContext
Definition: mjpegdec.h:56
lowres
static int lowres
Definition: ffplay.c:332
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:651
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s, int start_code)
Definition: mjpegdec.c:1846
AV_CODEC_ID_MJPEGB
@ AV_CODEC_ID_MJPEGB
Definition: codec_id.h:60
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
PutByteContext
Definition: bytestream.h:37
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1705
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1729
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2899
av_bswap32
#define av_bswap32
Definition: bswap.h:47
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2127
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
SOF15
@ SOF15
Definition: mjpeg.h:54
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:294
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:180
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2334
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:851
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
VD
#define VD
Definition: amfdec.c:664
src2
const pixel * src2
Definition: h264pred_template.c:421
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s)
Definition: jpeglsdec.c:355
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:215
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s)
Definition: mjpegdec.c:1443
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1835
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
ff_mjpeg_unescape_sos
int ff_mjpeg_unescape_sos(MJpegDecodeContext *s)
Definition: mjpegdec.c:2251
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:63
decode_flush
static av_cold void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2946
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1390
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:684
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1886
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:989
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int Ss, int Se, int Al, int *EOBRUN)
Definition: mjpegdec.c:1015
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:35
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:560
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1387
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:549
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:439
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:355
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:247
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:799
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1626
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:264
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1382
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:307
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
SOI
@ SOI
Definition: mjpeg.h:70
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
SOF1
@ SOF1
Definition: mjpeg.h:40
w
uint8_t w
Definition: llvidencdsp.c:39
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:464
bytestream2_put_bufferu
static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:301
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1645
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:47
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s)
Definition: mjpegdec.c:1251
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347