FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/attributes.h"
36 #include "libavutil/emms.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/avassert.h"
39 #include "libavutil/mem.h"
40 #include "libavutil/opt.h"
41 #include "avcodec.h"
42 #include "blockdsp.h"
43 #include "codec_internal.h"
44 #include "copy_block.h"
45 #include "decode.h"
46 #include "exif.h"
47 #include "hwaccel_internal.h"
48 #include "hwconfig.h"
49 #include "idctdsp.h"
50 #include "internal.h"
51 #include "jpegtables.h"
52 #include "mjpeg.h"
53 #include "mjpegdec.h"
54 #include "jpeglsdec.h"
55 #include "profiles.h"
56 #include "put_bits.h"
57 
58 
60 {
61  static const struct {
62  int class;
63  int index;
64  const uint8_t *bits;
65  const uint8_t *values;
66  int length;
67  } ht[] = {
69  ff_mjpeg_val_dc, 12 },
71  ff_mjpeg_val_dc, 12 },
80  };
81  int i, ret;
82 
83  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
84  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
85  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
86  ht[i].bits, ht[i].values,
87  ht[i].class == 1, s->avctx);
88  if (ret < 0)
89  return ret;
90 
91  if (ht[i].class < 2) {
92  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
93  ht[i].bits + 1, 16);
94  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
95  ht[i].values, ht[i].length);
96  }
97  }
98 
99  return 0;
100 }
101 
102 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
103 {
104  s->buggy_avid = 1;
105  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
106  s->interlace_polarity = 1;
107  if (len > 14 && buf[12] == 2) /* 2 - PAL */
108  s->interlace_polarity = 0;
109  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
110  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
111 }
112 
113 static void init_idct(AVCodecContext *avctx)
114 {
115  MJpegDecodeContext *s = avctx->priv_data;
116 
117  ff_idctdsp_init(&s->idsp, avctx);
118  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
119  s->idsp.idct_permutation);
120 }
121 
123 {
124  MJpegDecodeContext *s = avctx->priv_data;
125  int ret;
126 
127  if (!s->picture_ptr) {
128  s->picture = av_frame_alloc();
129  if (!s->picture)
130  return AVERROR(ENOMEM);
131  s->picture_ptr = s->picture;
132  }
133 
134  s->avctx = avctx;
135  ff_blockdsp_init(&s->bdsp);
136  init_idct(avctx);
137  s->buffer_size = 0;
138  s->buffer = NULL;
139  s->start_code = -1;
140  s->first_picture = 1;
141  s->got_picture = 0;
142  s->orig_height = avctx->coded_height;
144  avctx->colorspace = AVCOL_SPC_BT470BG;
145  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
146 
147  if ((ret = init_default_huffman_tables(s)) < 0)
148  return ret;
149 
150  if (s->extern_huff) {
151  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
152  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
153  return ret;
154  if (ff_mjpeg_decode_dht(s)) {
155  av_log(avctx, AV_LOG_ERROR,
156  "error using external huffman table, switching back to internal\n");
157  if ((ret = init_default_huffman_tables(s)) < 0)
158  return ret;
159  }
160  }
161  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
162  s->interlace_polarity = 1; /* bottom field first */
163  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
164  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
165  if (avctx->codec_tag == AV_RL32("MJPG"))
166  s->interlace_polarity = 1;
167  }
168 
169  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
170  if (avctx->extradata_size >= 4)
171  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
172 
173  if (s->smv_frames_per_jpeg <= 0) {
174  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
175  return AVERROR_INVALIDDATA;
176  }
177 
178  s->smv_frame = av_frame_alloc();
179  if (!s->smv_frame)
180  return AVERROR(ENOMEM);
181  } else if (avctx->extradata_size > 8
182  && AV_RL32(avctx->extradata) == 0x2C
183  && AV_RL32(avctx->extradata+4) == 0x18) {
184  parse_avid(s, avctx->extradata, avctx->extradata_size);
185  }
186 
187  if (avctx->codec->id == AV_CODEC_ID_AMV)
188  s->flipped = 1;
189 
190  return 0;
191 }
192 
193 
194 /* quantize tables */
196 {
197  int len, index, i;
198 
199  len = get_bits(&s->gb, 16) - 2;
200 
201  if (8*len > get_bits_left(&s->gb)) {
202  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
203  return AVERROR_INVALIDDATA;
204  }
205 
206  while (len >= 65) {
207  int pr = get_bits(&s->gb, 4);
208  if (pr > 1) {
209  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
210  return AVERROR_INVALIDDATA;
211  }
212  index = get_bits(&s->gb, 4);
213  if (index >= 4)
214  return -1;
215  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
216  /* read quant table */
217  for (i = 0; i < 64; i++) {
218  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
219  if (s->quant_matrixes[index][i] == 0) {
220  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
221  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
222  if (s->avctx->err_recognition & AV_EF_EXPLODE)
223  return AVERROR_INVALIDDATA;
224  }
225  }
226 
227  // XXX FIXME fine-tune, and perhaps add dc too
228  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
229  s->quant_matrixes[index][8]) >> 1;
230  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
231  index, s->qscale[index]);
232  len -= 1 + 64 * (1+pr);
233  }
234  return 0;
235 }
236 
237 /* decode huffman tables and build VLC decoders */
239 {
240  int len, index, i, class, n, v;
241  uint8_t bits_table[17];
242  uint8_t val_table[256];
243  int ret = 0;
244 
245  len = get_bits(&s->gb, 16) - 2;
246 
247  if (8*len > get_bits_left(&s->gb)) {
248  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
249  return AVERROR_INVALIDDATA;
250  }
251 
252  while (len > 0) {
253  if (len < 17)
254  return AVERROR_INVALIDDATA;
255  class = get_bits(&s->gb, 4);
256  if (class >= 2)
257  return AVERROR_INVALIDDATA;
258  index = get_bits(&s->gb, 4);
259  if (index >= 4)
260  return AVERROR_INVALIDDATA;
261  n = 0;
262  for (i = 1; i <= 16; i++) {
263  bits_table[i] = get_bits(&s->gb, 8);
264  n += bits_table[i];
265  }
266  len -= 17;
267  if (len < n || n > 256)
268  return AVERROR_INVALIDDATA;
269 
270  for (i = 0; i < n; i++) {
271  v = get_bits(&s->gb, 8);
272  val_table[i] = v;
273  }
274  len -= n;
275 
276  /* build VLC and flush previous vlc if present */
277  ff_vlc_free(&s->vlcs[class][index]);
278  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
279  class, index, n);
280  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
281  val_table, class > 0, s->avctx)) < 0)
282  return ret;
283 
284  if (class > 0) {
285  ff_vlc_free(&s->vlcs[2][index]);
286  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
287  val_table, 0, s->avctx)) < 0)
288  return ret;
289  }
290 
291  for (i = 0; i < 16; i++)
292  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
293  for (i = 0; i < 256; i++)
294  s->raw_huffman_values[class][index][i] = val_table[i];
295  }
296  return 0;
297 }
298 
300 {
301  int len, nb_components, i, width, height, bits, ret, size_change;
302  unsigned pix_fmt_id;
303  int h_count[MAX_COMPONENTS] = { 0 };
304  int v_count[MAX_COMPONENTS] = { 0 };
305 
306  s->cur_scan = 0;
307  memset(s->upscale_h, 0, sizeof(s->upscale_h));
308  memset(s->upscale_v, 0, sizeof(s->upscale_v));
309 
310  len = get_bits(&s->gb, 16);
311  bits = get_bits(&s->gb, 8);
312 
313  if (bits > 16 || bits < 1) {
314  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
315  return AVERROR_INVALIDDATA;
316  }
317 
318  if (s->avctx->bits_per_raw_sample != bits) {
319  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
320  s->avctx->bits_per_raw_sample = bits;
321  init_idct(s->avctx);
322  }
323  if (s->pegasus_rct)
324  bits = 9;
325  if (bits == 9 && !s->pegasus_rct)
326  s->rct = 1; // FIXME ugly
327 
328  if(s->lossless && s->avctx->lowres){
329  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
330  return -1;
331  }
332 
333  height = get_bits(&s->gb, 16);
334  width = get_bits(&s->gb, 16);
335 
336  // HACK for odd_height.mov
337  if (s->interlaced && s->width == width && s->height == height + 1)
338  height= s->height;
339 
340  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
341  if (av_image_check_size(width, height, 0, s->avctx) < 0)
342  return AVERROR_INVALIDDATA;
343 
344  // A valid frame requires at least 1 bit for DC + 1 bit for AC for each 8x8 block.
345  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
346  return AVERROR_INVALIDDATA;
347 
348  nb_components = get_bits(&s->gb, 8);
349  if (nb_components <= 0 ||
350  nb_components > MAX_COMPONENTS)
351  return -1;
352  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
353  if (nb_components != s->nb_components) {
354  av_log(s->avctx, AV_LOG_ERROR,
355  "nb_components changing in interlaced picture\n");
356  return AVERROR_INVALIDDATA;
357  }
358  }
359  if (s->ls && !(bits <= 8 || nb_components == 1)) {
361  "JPEG-LS that is not <= 8 "
362  "bits/component or 16-bit gray");
363  return AVERROR_PATCHWELCOME;
364  }
365  if (len != 8 + 3 * nb_components) {
366  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
367  return AVERROR_INVALIDDATA;
368  }
369 
370  s->nb_components = nb_components;
371  s->h_max = 1;
372  s->v_max = 1;
373  for (i = 0; i < nb_components; i++) {
374  /* component id */
375  s->component_id[i] = get_bits(&s->gb, 8);
376  h_count[i] = get_bits(&s->gb, 4);
377  v_count[i] = get_bits(&s->gb, 4);
378  /* compute hmax and vmax (only used in interleaved case) */
379  if (h_count[i] > s->h_max)
380  s->h_max = h_count[i];
381  if (v_count[i] > s->v_max)
382  s->v_max = v_count[i];
383  s->quant_index[i] = get_bits(&s->gb, 8);
384  if (s->quant_index[i] >= 4) {
385  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
386  return AVERROR_INVALIDDATA;
387  }
388  if (!h_count[i] || !v_count[i]) {
389  av_log(s->avctx, AV_LOG_ERROR,
390  "Invalid sampling factor in component %d %d:%d\n",
391  i, h_count[i], v_count[i]);
392  return AVERROR_INVALIDDATA;
393  }
394 
395  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
396  i, h_count[i], v_count[i],
397  s->component_id[i], s->quant_index[i]);
398  }
399  if ( nb_components == 4
400  && s->component_id[0] == 'C'
401  && s->component_id[1] == 'M'
402  && s->component_id[2] == 'Y'
403  && s->component_id[3] == 'K')
404  s->adobe_transform = 0;
405 
406  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
407  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
408  return AVERROR_PATCHWELCOME;
409  }
410 
411  if (s->bayer) {
412  if (nb_components == 2) {
413  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
414  width stored in their SOF3 markers is the width of each one. We only output
415  a single component, therefore we need to adjust the output image width. We
416  handle the deinterleaving (but not the debayering) in this file. */
417  width *= 2;
418  }
419  /* They can also contain 1 component, which is double the width and half the height
420  of the final image (rows are interleaved). We don't handle the decoding in this
421  file, but leave that to the TIFF/DNG decoder. */
422  }
423 
424  /* if different size, realloc/alloc picture */
425  if (width != s->width || height != s->height || bits != s->bits ||
426  memcmp(s->h_count, h_count, sizeof(h_count)) ||
427  memcmp(s->v_count, v_count, sizeof(v_count))) {
428  size_change = 1;
429 
430  s->width = width;
431  s->height = height;
432  s->bits = bits;
433  memcpy(s->h_count, h_count, sizeof(h_count));
434  memcpy(s->v_count, v_count, sizeof(v_count));
435  s->interlaced = 0;
436  s->got_picture = 0;
437 
438  /* test interlaced mode */
439  if (s->first_picture &&
440  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
441  s->orig_height != 0 &&
442  s->height < ((s->orig_height * 3) / 4)) {
443  s->interlaced = 1;
444  s->bottom_field = s->interlace_polarity;
445  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
446  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
447  height *= 2;
448  }
449 
450  ret = ff_set_dimensions(s->avctx, width, height);
451  if (ret < 0)
452  return ret;
453 
454  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
455  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
456  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
457  s->orig_height < height)
458  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
459 
460  s->first_picture = 0;
461  } else {
462  size_change = 0;
463  }
464 
465  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
466  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
467  if (s->avctx->height <= 0)
468  return AVERROR_INVALIDDATA;
469  }
470  if (s->bayer && s->progressive) {
471  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
472  return AVERROR_INVALIDDATA;
473  }
474 
475  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
476  if (s->progressive) {
477  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
478  return AVERROR_INVALIDDATA;
479  }
480  } else {
481  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
482  s->rgb = 1;
483  else if (!s->lossless)
484  s->rgb = 0;
485  /* XXX: not complete test ! */
486  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
487  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
488  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
489  (s->h_count[3] << 4) | s->v_count[3];
490  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
491  /* NOTE we do not allocate pictures large enough for the possible
492  * padding of h/v_count being 4 */
493  if (!(pix_fmt_id & 0xD0D0D0D0))
494  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
495  if (!(pix_fmt_id & 0x0D0D0D0D))
496  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
497 
498  for (i = 0; i < 8; i++) {
499  int j = 6 + (i&1) - (i&6);
500  int is = (pix_fmt_id >> (4*i)) & 0xF;
501  int js = (pix_fmt_id >> (4*j)) & 0xF;
502 
503  if (is == 1 && js != 2 && (i < 2 || i > 5))
504  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
505  if (is == 1 && js != 2 && (i < 2 || i > 5))
506  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
507 
508  if (is == 1 && js == 2) {
509  if (i & 1) s->upscale_h[j/2] = 1;
510  else s->upscale_v[j/2] = 1;
511  }
512  }
513 
514  if (s->bayer) {
515  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
516  goto unk_pixfmt;
517  }
518 
519  switch (pix_fmt_id) {
520  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
521  if (!s->bayer)
522  goto unk_pixfmt;
523  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
524  break;
525  case 0x11111100:
526  if (s->rgb)
527  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
528  else {
529  if ( s->adobe_transform == 0
530  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
531  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
532  } else {
533  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
534  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
535  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
536  }
537  }
538  av_assert0(s->nb_components == 3);
539  break;
540  case 0x11111111:
541  if (s->rgb)
542  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
543  else {
544  if (s->adobe_transform == 0 && s->bits <= 8) {
545  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
546  } else {
547  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
548  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
549  }
550  }
551  av_assert0(s->nb_components == 4);
552  break;
553  case 0x11412100:
554  if (s->bits > 8)
555  goto unk_pixfmt;
556  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
557  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
558  s->upscale_h[0] = 4;
559  s->upscale_h[1] = 0;
560  s->upscale_h[2] = 1;
561  } else {
562  goto unk_pixfmt;
563  }
564  break;
565  case 0x22111122:
566  case 0x22111111:
567  if (s->adobe_transform == 0 && s->bits <= 8) {
568  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
569  s->upscale_v[1] = s->upscale_v[2] = 1;
570  s->upscale_h[1] = s->upscale_h[2] = 1;
571  } else if (s->adobe_transform == 2 && s->bits <= 8) {
572  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
573  s->upscale_v[1] = s->upscale_v[2] = 1;
574  s->upscale_h[1] = s->upscale_h[2] = 1;
575  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
576  } else {
577  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
578  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
579  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
580  }
581  av_assert0(s->nb_components == 4);
582  break;
583  case 0x12121100:
584  case 0x22122100:
585  case 0x21211100:
586  case 0x21112100:
587  case 0x22211200:
588  case 0x22221100:
589  case 0x22112200:
590  case 0x11222200:
591  if (s->bits > 8)
592  goto unk_pixfmt;
593  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
594  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
595  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
596  } else {
597  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
598  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
599  }
600  break;
601  case 0x11000000:
602  case 0x13000000:
603  case 0x14000000:
604  case 0x31000000:
605  case 0x33000000:
606  case 0x34000000:
607  case 0x41000000:
608  case 0x43000000:
609  case 0x44000000:
610  if(s->bits <= 8)
611  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
612  else
613  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
614  break;
615  case 0x12111100:
616  case 0x14121200:
617  case 0x14111100:
618  case 0x22211100:
619  case 0x22112100:
620  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
621  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
622  else
623  goto unk_pixfmt;
624  s->upscale_v[1] = s->upscale_v[2] = 1;
625  } else {
626  if (pix_fmt_id == 0x14111100)
627  s->upscale_v[1] = s->upscale_v[2] = 1;
628  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
629  else
630  goto unk_pixfmt;
631  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
632  }
633  break;
634  case 0x21111100:
635  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
636  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
637  else
638  goto unk_pixfmt;
639  s->upscale_h[1] = s->upscale_h[2] = 1;
640  } else {
641  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
642  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
643  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
644  }
645  break;
646  case 0x11311100:
647  if (s->bits > 8)
648  goto unk_pixfmt;
649  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
650  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
651  else
652  goto unk_pixfmt;
653  s->upscale_h[0] = s->upscale_h[2] = 2;
654  break;
655  case 0x31111100:
656  if (s->bits > 8)
657  goto unk_pixfmt;
658  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
659  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
660  s->upscale_h[1] = s->upscale_h[2] = 2;
661  break;
662  case 0x22121100:
663  case 0x22111200:
664  case 0x41211100:
665  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
666  else
667  goto unk_pixfmt;
668  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
669  break;
670  case 0x22111100:
671  case 0x23111100:
672  case 0x42111100:
673  case 0x24111100:
674  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
675  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
676  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
677  if (pix_fmt_id == 0x42111100) {
678  if (s->bits > 8)
679  goto unk_pixfmt;
680  s->upscale_h[1] = s->upscale_h[2] = 1;
681  } else if (pix_fmt_id == 0x24111100) {
682  if (s->bits > 8)
683  goto unk_pixfmt;
684  s->upscale_v[1] = s->upscale_v[2] = 1;
685  } else if (pix_fmt_id == 0x23111100) {
686  if (s->bits > 8)
687  goto unk_pixfmt;
688  s->upscale_v[1] = s->upscale_v[2] = 2;
689  }
690  break;
691  case 0x41111100:
692  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
693  else
694  goto unk_pixfmt;
695  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
696  break;
697  default:
698  unk_pixfmt:
699  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
700  memset(s->upscale_h, 0, sizeof(s->upscale_h));
701  memset(s->upscale_v, 0, sizeof(s->upscale_v));
702  return AVERROR_PATCHWELCOME;
703  }
704  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
705  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
706  return AVERROR_PATCHWELCOME;
707  }
708  if (s->ls) {
709  memset(s->upscale_h, 0, sizeof(s->upscale_h));
710  memset(s->upscale_v, 0, sizeof(s->upscale_v));
711  if (s->nb_components == 3) {
712  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
713  } else if (s->nb_components != 1) {
714  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
715  return AVERROR_PATCHWELCOME;
716  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
717  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
718  else if (s->bits <= 8)
719  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
720  else
721  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
722  }
723 
724  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
725  if (!s->pix_desc) {
726  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
727  return AVERROR_BUG;
728  }
729 
730  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
731  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
732  } else {
733  enum AVPixelFormat pix_fmts[] = {
734 #if CONFIG_MJPEG_NVDEC_HWACCEL
736 #endif
737 #if CONFIG_MJPEG_VAAPI_HWACCEL
739 #endif
740  s->avctx->pix_fmt,
742  };
743  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
744  if (s->hwaccel_pix_fmt < 0)
745  return AVERROR(EINVAL);
746 
747  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
748  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
749  }
750 
751  if (s->avctx->skip_frame == AVDISCARD_ALL) {
752  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
753  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
754  s->got_picture = 1;
755  return 0;
756  }
757 
758  av_frame_unref(s->picture_ptr);
759  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
760  return -1;
761  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
762  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
763  s->got_picture = 1;
764 
765  // Lets clear the palette to avoid leaving uninitialized values in it
766  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
767  memset(s->picture_ptr->data[1], 0, 1024);
768 
769  for (i = 0; i < 4; i++)
770  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
771 
772  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
773  s->width, s->height, s->linesize[0], s->linesize[1],
774  s->interlaced, s->avctx->height);
775 
776  }
777 
778  if ((s->rgb && !s->lossless && !s->ls) ||
779  (!s->rgb && s->ls && s->nb_components > 1) ||
780  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
781  ) {
782  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
783  return AVERROR_PATCHWELCOME;
784  }
785 
786  /* totally blank picture as progressive JPEG will only add details to it */
787  if (s->progressive) {
788  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
789  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
790  for (i = 0; i < s->nb_components; i++) {
791  int size = bw * bh * s->h_count[i] * s->v_count[i];
792  av_freep(&s->blocks[i]);
793  av_freep(&s->last_nnz[i]);
794  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
795  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
796  if (!s->blocks[i] || !s->last_nnz[i])
797  return AVERROR(ENOMEM);
798  s->block_stride[i] = bw * s->h_count[i];
799  }
800  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
801  }
802 
803  if (s->avctx->hwaccel) {
804  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
805  s->hwaccel_picture_private =
806  av_mallocz(hwaccel->frame_priv_data_size);
807  if (!s->hwaccel_picture_private)
808  return AVERROR(ENOMEM);
809 
810  ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
811  s->raw_image_buffer_size);
812  if (ret < 0)
813  return ret;
814  }
815 
816  return 0;
817 }
818 
819 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
820 {
821  int code;
822  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
823  if (code < 0 || code > 16) {
824  av_log(s->avctx, AV_LOG_ERROR,
825  "mjpeg_decode_dc: bad vlc: %d\n", dc_index);
826  return AVERROR_INVALIDDATA;
827  }
828 
829  *val = code ? get_xbits(&s->gb, code) : 0;
830  return 0;
831 }
832 
833 /* decode block and dequantize */
834 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
835  int dc_index, int ac_index, uint16_t *quant_matrix)
836 {
837  int code, i, j, level, val;
838 
839  /* DC coef */
840  int ret = mjpeg_decode_dc(s, dc_index, &val);
841  if (ret < 0)
842  return ret;
843 
844  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
845  s->last_dc[component] = val;
846  block[0] = av_clip_int16(val);
847  /* AC coefs */
848  i = 0;
849  {OPEN_READER(re, &s->gb);
850  do {
851  UPDATE_CACHE(re, &s->gb);
852  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
853 
854  i += ((unsigned)code) >> 4;
855  code &= 0xf;
856  if (code) {
857  // GET_VLC updates the cache if parsing reaches the second stage.
858  // So we have at least MIN_CACHE_BITS - 9 > 15 bits left here
859  // and don't need to refill the cache.
860  {
861  int cache = GET_CACHE(re, &s->gb);
862  int sign = (~cache) >> 31;
863  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
864  }
865 
866  LAST_SKIP_BITS(re, &s->gb, code);
867 
868  if (i > 63) {
869  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
870  return AVERROR_INVALIDDATA;
871  }
872  j = s->permutated_scantable[i];
873  block[j] = level * quant_matrix[i];
874  }
875  } while (i < 63);
876  CLOSE_READER(re, &s->gb);}
877 
878  return 0;
879 }
880 
882  int component, int dc_index,
883  uint16_t *quant_matrix, int Al)
884 {
885  unsigned val;
886  s->bdsp.clear_block(block);
887  int ret = mjpeg_decode_dc(s, dc_index, &val);
888  if (ret < 0)
889  return ret;
890 
891  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
892  s->last_dc[component] = val;
893  block[0] = val;
894  return 0;
895 }
896 
897 /* decode block and dequantize - progressive JPEG version */
899  uint8_t *last_nnz, int ac_index,
900  uint16_t *quant_matrix,
901  int ss, int se, int Al, int *EOBRUN)
902 {
903  int code, i, j, val, run;
904  unsigned level;
905 
906  if (*EOBRUN) {
907  (*EOBRUN)--;
908  return 0;
909  }
910 
911  {
912  OPEN_READER(re, &s->gb);
913  for (i = ss; ; i++) {
914  UPDATE_CACHE(re, &s->gb);
915  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
916 
917  run = ((unsigned) code) >> 4;
918  code &= 0xF;
919  if (code) {
920  i += run;
921 
922  {
923  int cache = GET_CACHE(re, &s->gb);
924  int sign = (~cache) >> 31;
925  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
926  }
927 
928  LAST_SKIP_BITS(re, &s->gb, code);
929 
930  if (i >= se) {
931  if (i == se) {
932  j = s->permutated_scantable[se];
933  block[j] = level * (quant_matrix[se] << Al);
934  break;
935  }
936  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
937  return AVERROR_INVALIDDATA;
938  }
939  j = s->permutated_scantable[i];
940  block[j] = level * (quant_matrix[i] << Al);
941  } else {
942  if (run == 0xF) {// ZRL - skip 15 coefficients
943  i += 15;
944  if (i >= se) {
945  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
946  return AVERROR_INVALIDDATA;
947  }
948  } else {
949  val = (1 << run);
950  if (run) {
951  // Given that GET_VLC reloads internally, we always
952  // have at least 16 bits in the cache here.
953  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
954  LAST_SKIP_BITS(re, &s->gb, run);
955  }
956  *EOBRUN = val - 1;
957  break;
958  }
959  }
960  }
961  CLOSE_READER(re, &s->gb);
962  }
963 
964  if (i > *last_nnz)
965  *last_nnz = i;
966 
967  return 0;
968 }
969 
970 #define REFINE_BIT(j) { \
971  UPDATE_CACHE(re, &s->gb); \
972  sign = block[j] >> 15; \
973  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
974  ((quant_matrix[i] ^ sign) - sign) << Al; \
975  LAST_SKIP_BITS(re, &s->gb, 1); \
976 }
977 
978 #define ZERO_RUN \
979 for (; ; i++) { \
980  if (i > last) { \
981  i += run; \
982  if (i > se) { \
983  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
984  return -1; \
985  } \
986  break; \
987  } \
988  j = s->permutated_scantable[i]; \
989  if (block[j]) \
990  REFINE_BIT(j) \
991  else if (run-- == 0) \
992  break; \
993 }
994 
995 /* decode block and dequantize - progressive JPEG refinement pass */
997  uint8_t *last_nnz,
998  int ac_index, uint16_t *quant_matrix,
999  int ss, int se, int Al, int *EOBRUN)
1000 {
1001  int code, i = ss, j, sign, val, run;
1002  int last = FFMIN(se, *last_nnz);
1003 
1004  OPEN_READER(re, &s->gb);
1005  if (*EOBRUN) {
1006  (*EOBRUN)--;
1007  } else {
1008  for (; ; i++) {
1009  UPDATE_CACHE(re, &s->gb);
1010  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1011 
1012  if (code & 0xF) {
1013  run = ((unsigned) code) >> 4;
1014  val = SHOW_UBITS(re, &s->gb, 1);
1015  LAST_SKIP_BITS(re, &s->gb, 1);
1016  ZERO_RUN;
1017  j = s->permutated_scantable[i];
1018  val--;
1019  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1020  if (i == se) {
1021  if (i > *last_nnz)
1022  *last_nnz = i;
1023  CLOSE_READER(re, &s->gb);
1024  return 0;
1025  }
1026  } else {
1027  run = ((unsigned) code) >> 4;
1028  if (run == 0xF) {
1029  ZERO_RUN;
1030  } else {
1031  val = run;
1032  run = (1 << run);
1033  if (val) {
1034  // Given that GET_VLC reloads internally, we always
1035  // have at least 16 bits in the cache here.
1036  run += SHOW_UBITS(re, &s->gb, val);
1037  LAST_SKIP_BITS(re, &s->gb, val);
1038  }
1039  *EOBRUN = run - 1;
1040  break;
1041  }
1042  }
1043  }
1044 
1045  if (i > *last_nnz)
1046  *last_nnz = i;
1047  }
1048 
1049  for (; i <= last; i++) {
1050  j = s->permutated_scantable[i];
1051  if (block[j])
1052  REFINE_BIT(j)
1053  }
1054  CLOSE_READER(re, &s->gb);
1055 
1056  return 0;
1057 }
1058 #undef REFINE_BIT
1059 #undef ZERO_RUN
1060 
1061 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1062 {
1063  int i;
1064  int reset = 0;
1065 
1066  if (s->restart_interval) {
1067  s->restart_count--;
1068  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1069  align_get_bits(&s->gb);
1070  for (i = 0; i < nb_components; i++) /* reset dc */
1071  s->last_dc[i] = (4 << s->bits);
1072  }
1073 
1074  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1075  /* skip RSTn */
1076  if (s->restart_count == 0) {
1077  if( show_bits(&s->gb, i) == (1 << i) - 1
1078  || show_bits(&s->gb, i) == 0xFF) {
1079  int pos = get_bits_count(&s->gb);
1080  align_get_bits(&s->gb);
1081  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1082  skip_bits(&s->gb, 8);
1083  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1084  for (i = 0; i < nb_components; i++) /* reset dc */
1085  s->last_dc[i] = (4 << s->bits);
1086  reset = 1;
1087  } else
1088  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1089  }
1090  }
1091  }
1092  return reset;
1093 }
1094 
1095 /* Handles 1 to 4 components */
1096 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1097 {
1098  int i, mb_x, mb_y;
1099  unsigned width;
1100  uint16_t (*buffer)[4];
1101  int left[4], top[4], topleft[4];
1102  const int linesize = s->linesize[0];
1103  const int mask = ((1 << s->bits) - 1) << point_transform;
1104  int resync_mb_y = 0;
1105  int resync_mb_x = 0;
1106  int vpred[6];
1107  int ret;
1108 
1109  if (!s->bayer && s->nb_components < 3)
1110  return AVERROR_INVALIDDATA;
1111  if (s->bayer && s->nb_components > 2)
1112  return AVERROR_INVALIDDATA;
1113  if (s->nb_components <= 0 || s->nb_components > 4)
1114  return AVERROR_INVALIDDATA;
1115  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1116  return AVERROR_INVALIDDATA;
1117  if (s->bayer) {
1118  if (s->rct || s->pegasus_rct)
1119  return AVERROR_INVALIDDATA;
1120  }
1121 
1122 
1123  s->restart_count = s->restart_interval;
1124 
1125  if (s->restart_interval == 0)
1126  s->restart_interval = INT_MAX;
1127 
1128  if (s->bayer)
1129  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1130  else
1131  width = s->mb_width;
1132 
1133  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1134  if (!s->ljpeg_buffer)
1135  return AVERROR(ENOMEM);
1136 
1137  buffer = s->ljpeg_buffer;
1138 
1139  for (i = 0; i < 4; i++)
1140  buffer[0][i] = 1 << (s->bits - 1);
1141 
1142  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1143  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1144 
1145  if (s->interlaced && s->bottom_field)
1146  ptr += linesize >> 1;
1147 
1148  for (i = 0; i < 4; i++)
1149  top[i] = left[i] = topleft[i] = buffer[0][i];
1150 
1151  if ((mb_y * s->width) % s->restart_interval == 0) {
1152  for (i = 0; i < 6; i++)
1153  vpred[i] = 1 << (s->bits-1);
1154  }
1155 
1156  for (mb_x = 0; mb_x < width; mb_x++) {
1157  int modified_predictor = predictor;
1158 
1159  if (get_bits_left(&s->gb) < 1) {
1160  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1161  return AVERROR_INVALIDDATA;
1162  }
1163 
1164  if (s->restart_interval && !s->restart_count){
1165  s->restart_count = s->restart_interval;
1166  resync_mb_x = mb_x;
1167  resync_mb_y = mb_y;
1168  for(i=0; i<4; i++)
1169  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1170  }
1171  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1172  modified_predictor = 1;
1173 
1174  for (i=0;i<nb_components;i++) {
1175  int pred, dc;
1176 
1177  topleft[i] = top[i];
1178  top[i] = buffer[mb_x][i];
1179 
1180  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1181  if (ret < 0)
1182  return ret;
1183 
1184  if (!s->bayer || mb_x) {
1185  pred = left[i];
1186  } else { /* This path runs only for the first line in bayer images */
1187  vpred[i] += dc;
1188  pred = vpred[i] - dc;
1189  }
1190 
1191  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1192 
1193  left[i] = buffer[mb_x][i] =
1194  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1195  }
1196 
1197  if (s->restart_interval && !--s->restart_count) {
1198  align_get_bits(&s->gb);
1199  skip_bits(&s->gb, 16); /* skip RSTn */
1200  }
1201  }
1202  if (s->rct && s->nb_components == 4) {
1203  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1204  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1205  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1206  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1207  ptr[4*mb_x + 0] = buffer[mb_x][3];
1208  }
1209  } else if (s->nb_components == 4) {
1210  for(i=0; i<nb_components; i++) {
1211  int c= s->comp_index[i];
1212  if (s->bits <= 8) {
1213  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1214  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1215  }
1216  } else if(s->bits == 9) {
1217  return AVERROR_PATCHWELCOME;
1218  } else {
1219  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1220  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1221  }
1222  }
1223  }
1224  } else if (s->rct) {
1225  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1226  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1227  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1228  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1229  }
1230  } else if (s->pegasus_rct) {
1231  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1232  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1233  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1234  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1235  }
1236  } else if (s->bayer) {
1237  if (s->bits <= 8)
1238  return AVERROR_PATCHWELCOME;
1239  if (nb_components == 1) {
1240  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1241  for (mb_x = 0; mb_x < width; mb_x++)
1242  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1243  } else if (nb_components == 2) {
1244  for (mb_x = 0; mb_x < width; mb_x++) {
1245  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1246  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1247  }
1248  }
1249  } else {
1250  for(i=0; i<nb_components; i++) {
1251  int c= s->comp_index[i];
1252  if (s->bits <= 8) {
1253  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1254  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1255  }
1256  } else if(s->bits == 9) {
1257  return AVERROR_PATCHWELCOME;
1258  } else {
1259  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1260  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1261  }
1262  }
1263  }
1264  }
1265  }
1266  return 0;
1267 }
1268 
1270  int point_transform, int nb_components)
1271 {
1272  int i, mb_x, mb_y, mask;
1273  int bits= (s->bits+7)&~7;
1274  int resync_mb_y = 0;
1275  int resync_mb_x = 0;
1276  int ret;
1277 
1278  point_transform += bits - s->bits;
1279  mask = ((1 << s->bits) - 1) << point_transform;
1280 
1281  av_assert0(nb_components>=1 && nb_components<=4);
1282 
1283  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1284  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1285  if (get_bits_left(&s->gb) < 1) {
1286  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1287  return AVERROR_INVALIDDATA;
1288  }
1289  if (s->restart_interval && !s->restart_count){
1290  s->restart_count = s->restart_interval;
1291  resync_mb_x = mb_x;
1292  resync_mb_y = mb_y;
1293  }
1294 
1295  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1296  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1297  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1298  for (i = 0; i < nb_components; i++) {
1299  uint8_t *ptr;
1300  uint16_t *ptr16;
1301  int n, h, v, x, y, c, j, linesize;
1302  n = s->nb_blocks[i];
1303  c = s->comp_index[i];
1304  h = s->h_scount[i];
1305  v = s->v_scount[i];
1306  x = 0;
1307  y = 0;
1308  linesize= s->linesize[c];
1309 
1310  if(bits>8) linesize /= 2;
1311 
1312  for(j=0; j<n; j++) {
1313  int pred, dc;
1314 
1315  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1316  if (ret < 0)
1317  return ret;
1318 
1319  if ( h * mb_x + x >= s->width
1320  || v * mb_y + y >= s->height) {
1321  // Nothing to do
1322  } else if (bits<=8) {
1323  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1324  if(y==0 && toprow){
1325  if(x==0 && leftcol){
1326  pred= 1 << (bits - 1);
1327  }else{
1328  pred= ptr[-1];
1329  }
1330  }else{
1331  if(x==0 && leftcol){
1332  pred= ptr[-linesize];
1333  }else{
1334  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1335  }
1336  }
1337 
1338  if (s->interlaced && s->bottom_field)
1339  ptr += linesize >> 1;
1340  pred &= mask;
1341  *ptr= pred + ((unsigned)dc << point_transform);
1342  }else{
1343  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1344  if(y==0 && toprow){
1345  if(x==0 && leftcol){
1346  pred= 1 << (bits - 1);
1347  }else{
1348  pred= ptr16[-1];
1349  }
1350  }else{
1351  if(x==0 && leftcol){
1352  pred= ptr16[-linesize];
1353  }else{
1354  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1355  }
1356  }
1357 
1358  if (s->interlaced && s->bottom_field)
1359  ptr16 += linesize >> 1;
1360  pred &= mask;
1361  *ptr16= pred + ((unsigned)dc << point_transform);
1362  }
1363  if (++x == h) {
1364  x = 0;
1365  y++;
1366  }
1367  }
1368  }
1369  } else {
1370  for (i = 0; i < nb_components; i++) {
1371  uint8_t *ptr;
1372  uint16_t *ptr16;
1373  int n, h, v, x, y, c, j, linesize, dc;
1374  n = s->nb_blocks[i];
1375  c = s->comp_index[i];
1376  h = s->h_scount[i];
1377  v = s->v_scount[i];
1378  x = 0;
1379  y = 0;
1380  linesize = s->linesize[c];
1381 
1382  if(bits>8) linesize /= 2;
1383 
1384  for (j = 0; j < n; j++) {
1385  int pred;
1386 
1387  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1388  if (ret < 0)
1389  return ret;
1390 
1391  if ( h * mb_x + x >= s->width
1392  || v * mb_y + y >= s->height) {
1393  // Nothing to do
1394  } else if (bits<=8) {
1395  ptr = s->picture_ptr->data[c] +
1396  (linesize * (v * mb_y + y)) +
1397  (h * mb_x + x); //FIXME optimize this crap
1398  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1399 
1400  pred &= mask;
1401  *ptr = pred + ((unsigned)dc << point_transform);
1402  }else{
1403  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1404  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1405 
1406  pred &= mask;
1407  *ptr16= pred + ((unsigned)dc << point_transform);
1408  }
1409 
1410  if (++x == h) {
1411  x = 0;
1412  y++;
1413  }
1414  }
1415  }
1416  }
1417  if (s->restart_interval && !--s->restart_count) {
1418  align_get_bits(&s->gb);
1419  skip_bits(&s->gb, 16); /* skip RSTn */
1420  }
1421  }
1422  }
1423  return 0;
1424 }
1425 
1427  uint8_t *dst, const uint8_t *src,
1428  int linesize, int lowres)
1429 {
1430  switch (lowres) {
1431  case 0: s->copy_block(dst, src, linesize, 8);
1432  break;
1433  case 1: copy_block4(dst, src, linesize, linesize, 4);
1434  break;
1435  case 2: copy_block2(dst, src, linesize, linesize, 2);
1436  break;
1437  case 3: *dst = *src;
1438  break;
1439  }
1440 }
1441 
1442 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1443 {
1444  int block_x, block_y;
1445  int size = 8 >> s->avctx->lowres;
1446  if (s->bits > 8) {
1447  for (block_y=0; block_y<size; block_y++)
1448  for (block_x=0; block_x<size; block_x++)
1449  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1450  } else {
1451  for (block_y=0; block_y<size; block_y++)
1452  for (block_x=0; block_x<size; block_x++)
1453  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1454  }
1455 }
1456 
1457 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1458  int Al, const uint8_t *mb_bitmask,
1459  int mb_bitmask_size,
1460  const AVFrame *reference)
1461 {
1462  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1463  uint8_t *data[MAX_COMPONENTS];
1464  const uint8_t *reference_data[MAX_COMPONENTS];
1465  int linesize[MAX_COMPONENTS];
1466  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1467  int bytes_per_pixel = 1 + (s->bits > 8);
1468 
1469  if (mb_bitmask) {
1470  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1471  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1472  return AVERROR_INVALIDDATA;
1473  }
1474  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1475  }
1476 
1477  s->restart_count = 0;
1478 
1479  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1480  &chroma_v_shift);
1481  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1482  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1483 
1484  for (i = 0; i < nb_components; i++) {
1485  int c = s->comp_index[i];
1486  data[c] = s->picture_ptr->data[c];
1487  reference_data[c] = reference ? reference->data[c] : NULL;
1488  linesize[c] = s->linesize[c];
1489  s->coefs_finished[c] |= 1;
1490  }
1491 
1492  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1493  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1494  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1495 
1496  if (s->restart_interval && !s->restart_count)
1497  s->restart_count = s->restart_interval;
1498 
1499  if (get_bits_left(&s->gb) < 0) {
1500  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1501  -get_bits_left(&s->gb));
1502  return AVERROR_INVALIDDATA;
1503  }
1504  for (i = 0; i < nb_components; i++) {
1505  uint8_t *ptr;
1506  int n, h, v, x, y, c, j;
1507  int block_offset;
1508  n = s->nb_blocks[i];
1509  c = s->comp_index[i];
1510  h = s->h_scount[i];
1511  v = s->v_scount[i];
1512  x = 0;
1513  y = 0;
1514  for (j = 0; j < n; j++) {
1515  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1516  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1517 
1518  if (s->interlaced && s->bottom_field)
1519  block_offset += linesize[c] >> 1;
1520  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1521  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1522  ptr = data[c] + block_offset;
1523  } else
1524  ptr = NULL;
1525  if (!s->progressive) {
1526  if (copy_mb) {
1527  if (ptr)
1528  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1529  linesize[c], s->avctx->lowres);
1530 
1531  } else {
1532  s->bdsp.clear_block(s->block);
1533  if (decode_block(s, s->block, i,
1534  s->dc_index[i], s->ac_index[i],
1535  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1536  av_log(s->avctx, AV_LOG_ERROR,
1537  "error y=%d x=%d\n", mb_y, mb_x);
1538  return AVERROR_INVALIDDATA;
1539  }
1540  if (ptr && linesize[c]) {
1541  s->idsp.idct_put(ptr, linesize[c], s->block);
1542  if (s->bits & 7)
1543  shift_output(s, ptr, linesize[c]);
1544  }
1545  }
1546  } else {
1547  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1548  (h * mb_x + x);
1549  int16_t *block = s->blocks[c][block_idx];
1550  if (Ah)
1551  block[0] += get_bits1(&s->gb) *
1552  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1553  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1554  s->quant_matrixes[s->quant_sindex[i]],
1555  Al) < 0) {
1556  av_log(s->avctx, AV_LOG_ERROR,
1557  "error y=%d x=%d\n", mb_y, mb_x);
1558  return AVERROR_INVALIDDATA;
1559  }
1560  }
1561  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1562  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1563  mb_x, mb_y, x, y, c, s->bottom_field,
1564  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1565  if (++x == h) {
1566  x = 0;
1567  y++;
1568  }
1569  }
1570  }
1571 
1572  handle_rstn(s, nb_components);
1573  }
1574  }
1575  return 0;
1576 }
1577 
1579  int se, int Ah, int Al)
1580 {
1581  int mb_x, mb_y;
1582  int EOBRUN = 0;
1583  int c = s->comp_index[0];
1584  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1585 
1586  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1587  if (se < ss || se > 63) {
1588  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1589  return AVERROR_INVALIDDATA;
1590  }
1591 
1592  // s->coefs_finished is a bitmask for coefficients coded
1593  // ss and se are parameters telling start and end coefficients
1594  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1595 
1596  s->restart_count = 0;
1597 
1598  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1599  int block_idx = mb_y * s->block_stride[c];
1600  int16_t (*block)[64] = &s->blocks[c][block_idx];
1601  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1602  if (get_bits_left(&s->gb) <= 0) {
1603  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1604  return AVERROR_INVALIDDATA;
1605  }
1606  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1607  int ret;
1608  if (s->restart_interval && !s->restart_count)
1609  s->restart_count = s->restart_interval;
1610 
1611  if (Ah)
1612  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1613  quant_matrix, ss, se, Al, &EOBRUN);
1614  else
1615  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1616  quant_matrix, ss, se, Al, &EOBRUN);
1617 
1618  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1620  if (ret < 0) {
1621  av_log(s->avctx, AV_LOG_ERROR,
1622  "error y=%d x=%d\n", mb_y, mb_x);
1623  return AVERROR_INVALIDDATA;
1624  }
1625 
1626  if (handle_rstn(s, 0))
1627  EOBRUN = 0;
1628  }
1629  }
1630  return 0;
1631 }
1632 
1634 {
1635  int mb_x, mb_y;
1636  int c;
1637  const int bytes_per_pixel = 1 + (s->bits > 8);
1638  const int block_size = s->lossless ? 1 : 8;
1639 
1640  for (c = 0; c < s->nb_components; c++) {
1641  uint8_t *data = s->picture_ptr->data[c];
1642  int linesize = s->linesize[c];
1643  int h = s->h_max / s->h_count[c];
1644  int v = s->v_max / s->v_count[c];
1645  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1646  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1647 
1648  if (~s->coefs_finished[c])
1649  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1650 
1651  if (s->interlaced && s->bottom_field)
1652  data += linesize >> 1;
1653 
1654  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1655  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1656  int block_idx = mb_y * s->block_stride[c];
1657  int16_t (*block)[64] = &s->blocks[c][block_idx];
1658  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1659  s->idsp.idct_put(ptr, linesize, *block);
1660  if (s->bits & 7)
1661  shift_output(s, ptr, linesize);
1662  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1663  }
1664  }
1665  }
1666 }
1667 
1668 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1669  int mb_bitmask_size, const AVFrame *reference)
1670 {
1671  int len, nb_components, i, h, v, predictor, point_transform;
1672  int index, id, ret;
1673  const int block_size = s->lossless ? 1 : 8;
1674  int ilv, prev_shift;
1675 
1676  if (!s->got_picture) {
1677  av_log(s->avctx, AV_LOG_WARNING,
1678  "Can not process SOS before SOF, skipping\n");
1679  return -1;
1680  }
1681 
1682  /* XXX: verify len field validity */
1683  len = get_bits(&s->gb, 16);
1684  nb_components = get_bits(&s->gb, 8);
1685  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1687  "decode_sos: nb_components (%d)",
1688  nb_components);
1689  return AVERROR_PATCHWELCOME;
1690  }
1691  if (len != 6 + 2 * nb_components) {
1692  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1693  return AVERROR_INVALIDDATA;
1694  }
1695  for (i = 0; i < nb_components; i++) {
1696  id = get_bits(&s->gb, 8);
1697  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1698  /* find component index */
1699  for (index = 0; index < s->nb_components; index++)
1700  if (id == s->component_id[index])
1701  break;
1702  if (index == s->nb_components) {
1703  av_log(s->avctx, AV_LOG_ERROR,
1704  "decode_sos: index(%d) out of components\n", index);
1705  return AVERROR_INVALIDDATA;
1706  }
1707  /* Metasoft MJPEG codec has Cb and Cr swapped */
1708  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1709  && nb_components == 3 && s->nb_components == 3 && i)
1710  index = 3 - i;
1711 
1712  s->quant_sindex[i] = s->quant_index[index];
1713  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1714  s->h_scount[i] = s->h_count[index];
1715  s->v_scount[i] = s->v_count[index];
1716 
1717  s->comp_index[i] = index;
1718 
1719  s->dc_index[i] = get_bits(&s->gb, 4);
1720  s->ac_index[i] = get_bits(&s->gb, 4);
1721 
1722  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1723  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1724  goto out_of_range;
1725  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1726  goto out_of_range;
1727  }
1728 
1729  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1730  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1731  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1732  prev_shift = get_bits(&s->gb, 4); /* Ah */
1733  point_transform = get_bits(&s->gb, 4); /* Al */
1734  }else
1735  prev_shift = point_transform = 0;
1736 
1737  if (nb_components > 1) {
1738  /* interleaved stream */
1739  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1740  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1741  } else if (!s->ls) { /* skip this for JPEG-LS */
1742  h = s->h_max / s->h_scount[0];
1743  v = s->v_max / s->v_scount[0];
1744  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1745  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1746  s->nb_blocks[0] = 1;
1747  s->h_scount[0] = 1;
1748  s->v_scount[0] = 1;
1749  }
1750 
1751  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1752  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1753  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1754  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1755  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1756 
1757 
1758  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1759  for (i = s->mjpb_skiptosod; i > 0; i--)
1760  skip_bits(&s->gb, 8);
1761 
1762 next_field:
1763  for (i = 0; i < nb_components; i++)
1764  s->last_dc[i] = (4 << s->bits);
1765 
1766  if (s->avctx->hwaccel) {
1767  int bytes_to_start = get_bits_count(&s->gb) / 8;
1768  av_assert0(bytes_to_start >= 0 &&
1769  s->raw_scan_buffer_size >= bytes_to_start);
1770 
1771  ret = FF_HW_CALL(s->avctx, decode_slice,
1772  s->raw_scan_buffer + bytes_to_start,
1773  s->raw_scan_buffer_size - bytes_to_start);
1774  if (ret < 0)
1775  return ret;
1776 
1777  } else if (s->lossless) {
1778  av_assert0(s->picture_ptr == s->picture);
1779  if (CONFIG_JPEGLS_DECODER && s->ls) {
1780 // for () {
1781 // reset_ls_coding_parameters(s, 0);
1782 
1784  point_transform, ilv)) < 0)
1785  return ret;
1786  } else {
1787  if (s->rgb || s->bayer) {
1788  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1789  return ret;
1790  } else {
1792  point_transform,
1793  nb_components)) < 0)
1794  return ret;
1795  }
1796  }
1797  } else {
1798  if (s->progressive && predictor) {
1799  av_assert0(s->picture_ptr == s->picture);
1801  ilv, prev_shift,
1802  point_transform)) < 0)
1803  return ret;
1804  } else {
1805  if ((ret = mjpeg_decode_scan(s, nb_components,
1806  prev_shift, point_transform,
1807  mb_bitmask, mb_bitmask_size, reference)) < 0)
1808  return ret;
1809  }
1810  }
1811 
1812  if (s->interlaced &&
1813  get_bits_left(&s->gb) > 32 &&
1814  show_bits(&s->gb, 8) == 0xFF) {
1815  GetBitContext bak = s->gb;
1816  align_get_bits(&bak);
1817  if (show_bits(&bak, 16) == 0xFFD1) {
1818  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1819  s->gb = bak;
1820  skip_bits(&s->gb, 16);
1821  s->bottom_field ^= 1;
1822 
1823  goto next_field;
1824  }
1825  }
1826 
1827  emms_c();
1828  return 0;
1829  out_of_range:
1830  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1831  return AVERROR_INVALIDDATA;
1832 }
1833 
1835 {
1836  if (get_bits(&s->gb, 16) != 4)
1837  return AVERROR_INVALIDDATA;
1838  s->restart_interval = get_bits(&s->gb, 16);
1839  s->restart_count = 0;
1840  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1841  s->restart_interval);
1842 
1843  return 0;
1844 }
1845 
1847 {
1848  int len, id, i;
1849 
1850  len = get_bits(&s->gb, 16);
1851  if (len < 2)
1852  return AVERROR_INVALIDDATA;
1853  len -= 2;
1854 
1855  if (len < 4) {
1856  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1857  return AVERROR_INVALIDDATA;
1858  av_log(s->avctx, AV_LOG_VERBOSE, "skipping APPx stub (len=%" PRId32 ")\n", len);
1859  goto out;
1860  }
1861 
1862  if (8 * len > get_bits_left(&s->gb))
1863  return AVERROR_INVALIDDATA;
1864 
1865  id = get_bits_long(&s->gb, 32);
1866  len -= 4;
1867 
1868  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1869  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1870  av_fourcc2str(av_bswap32(id)), id, len);
1871 
1872  /* Buggy AVID, it puts EOI only at every 10th frame. */
1873  /* Also, this fourcc is used by non-avid files too, it holds some
1874  information, but it's always present in AVID-created files. */
1875  if (id == AV_RB32("AVI1")) {
1876  /* structure:
1877  4bytes AVI1
1878  1bytes polarity
1879  1bytes always zero
1880  4bytes field_size
1881  4bytes field_size_less_padding
1882  */
1883  s->buggy_avid = 1;
1884  i = get_bits(&s->gb, 8); len--;
1885  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1886  goto out;
1887  }
1888 
1889  if (id == AV_RB32("JFIF")) {
1890  int t_w, t_h, v1, v2;
1891  if (len < 8)
1892  goto out;
1893  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1894  v1 = get_bits(&s->gb, 8);
1895  v2 = get_bits(&s->gb, 8);
1896  skip_bits(&s->gb, 8);
1897 
1898  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1899  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1900  if ( s->avctx->sample_aspect_ratio.num <= 0
1901  || s->avctx->sample_aspect_ratio.den <= 0) {
1902  s->avctx->sample_aspect_ratio.num = 0;
1903  s->avctx->sample_aspect_ratio.den = 1;
1904  }
1905 
1906  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1907  av_log(s->avctx, AV_LOG_INFO,
1908  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1909  v1, v2,
1910  s->avctx->sample_aspect_ratio.num,
1911  s->avctx->sample_aspect_ratio.den);
1912 
1913  len -= 8;
1914  if (len >= 2) {
1915  t_w = get_bits(&s->gb, 8);
1916  t_h = get_bits(&s->gb, 8);
1917  if (t_w && t_h) {
1918  /* skip thumbnail */
1919  if (len -10 - (t_w * t_h * 3) > 0)
1920  len -= t_w * t_h * 3;
1921  }
1922  len -= 2;
1923  }
1924  goto out;
1925  }
1926 
1927  if ( id == AV_RB32("Adob")
1928  && len >= 8
1929  && show_bits(&s->gb, 8) == 'e'
1930  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1931  skip_bits(&s->gb, 8); /* 'e' */
1932  skip_bits(&s->gb, 16); /* version */
1933  skip_bits(&s->gb, 16); /* flags0 */
1934  skip_bits(&s->gb, 16); /* flags1 */
1935  s->adobe_transform = get_bits(&s->gb, 8);
1936  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1937  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1938  len -= 8;
1939  goto out;
1940  }
1941 
1942  if (id == AV_RB32("LJIF")) {
1943  int rgb = s->rgb;
1944  int pegasus_rct = s->pegasus_rct;
1945  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1946  av_log(s->avctx, AV_LOG_INFO,
1947  "Pegasus lossless jpeg header found\n");
1948  skip_bits(&s->gb, 16); /* version ? */
1949  skip_bits(&s->gb, 16); /* unknown always 0? */
1950  skip_bits(&s->gb, 16); /* unknown always 0? */
1951  skip_bits(&s->gb, 16); /* unknown always 0? */
1952  switch (i=get_bits(&s->gb, 8)) {
1953  case 1:
1954  rgb = 1;
1955  pegasus_rct = 0;
1956  break;
1957  case 2:
1958  rgb = 1;
1959  pegasus_rct = 1;
1960  break;
1961  default:
1962  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1963  }
1964 
1965  len -= 9;
1966  if (s->bayer)
1967  goto out;
1968  if (s->got_picture)
1969  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1970  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1971  goto out;
1972  }
1973 
1974  s->rgb = rgb;
1975  s->pegasus_rct = pegasus_rct;
1976 
1977  goto out;
1978  }
1979  if (id == AV_RL32("colr") && len > 0) {
1980  s->colr = get_bits(&s->gb, 8);
1981  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1982  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1983  len --;
1984  goto out;
1985  }
1986  if (id == AV_RL32("xfrm") && len > 0) {
1987  s->xfrm = get_bits(&s->gb, 8);
1988  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1989  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1990  len --;
1991  goto out;
1992  }
1993 
1994  /* JPS extension by VRex */
1995  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1996  int flags, layout, type;
1997  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1998  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1999 
2000  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
2001  skip_bits(&s->gb, 16); len -= 2; /* block length */
2002  skip_bits(&s->gb, 8); /* reserved */
2003  flags = get_bits(&s->gb, 8);
2004  layout = get_bits(&s->gb, 8);
2005  type = get_bits(&s->gb, 8);
2006  len -= 4;
2007 
2008  av_freep(&s->stereo3d);
2009  s->stereo3d = av_stereo3d_alloc();
2010  if (!s->stereo3d) {
2011  goto out;
2012  }
2013  if (type == 0) {
2014  s->stereo3d->type = AV_STEREO3D_2D;
2015  } else if (type == 1) {
2016  switch (layout) {
2017  case 0x01:
2018  s->stereo3d->type = AV_STEREO3D_LINES;
2019  break;
2020  case 0x02:
2021  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2022  break;
2023  case 0x03:
2024  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2025  break;
2026  }
2027  if (!(flags & 0x04)) {
2028  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2029  }
2030  }
2031  goto out;
2032  }
2033 
2034  /* EXIF metadata */
2035  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2036  int ret;
2037  const uint8_t *aligned;
2038 
2039  skip_bits(&s->gb, 16); // skip padding
2040  len -= 2;
2041 
2042  // init byte wise reading
2043  aligned = align_get_bits(&s->gb);
2044 
2045  ret = av_exif_parse_buffer(s->avctx, aligned, len, &s->exif_metadata, AV_EXIF_TIFF_HEADER);
2046  if (ret < 0) {
2047  av_log(s->avctx, AV_LOG_WARNING, "unable to parse EXIF buffer\n");
2048  goto out;
2049  }
2050 
2051  skip_bits(&s->gb, ret << 3);
2052  len -= ret;
2053 
2054  goto out;
2055  }
2056 
2057  /* Apple MJPEG-A */
2058  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2059  id = get_bits_long(&s->gb, 32);
2060  len -= 4;
2061  /* Apple MJPEG-A */
2062  if (id == AV_RB32("mjpg")) {
2063  /* structure:
2064  4bytes field size
2065  4bytes pad field size
2066  4bytes next off
2067  4bytes quant off
2068  4bytes huff off
2069  4bytes image off
2070  4bytes scan off
2071  4bytes data off
2072  */
2073  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2074  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2075  }
2076  }
2077 
2078  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2079  int id2;
2080  unsigned seqno;
2081  unsigned nummarkers;
2082 
2083  id = get_bits_long(&s->gb, 32);
2084  id2 = get_bits(&s->gb, 24);
2085  len -= 7;
2086  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2087  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2088  goto out;
2089  }
2090 
2091  skip_bits(&s->gb, 8);
2092  seqno = get_bits(&s->gb, 8);
2093  len -= 2;
2094  if (seqno == 0) {
2095  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2096  goto out;
2097  }
2098 
2099  nummarkers = get_bits(&s->gb, 8);
2100  len -= 1;
2101  if (nummarkers == 0) {
2102  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2103  goto out;
2104  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2105  av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
2106  goto out;
2107  } else if (seqno > nummarkers) {
2108  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2109  goto out;
2110  }
2111 
2112  /* Allocate if this is the first APP2 we've seen. */
2113  if (s->iccnum == 0) {
2114  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2115  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2116  return AVERROR(ENOMEM);
2117  }
2118  s->iccnum = nummarkers;
2119  }
2120 
2121  if (s->iccentries[seqno - 1].data) {
2122  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2123  goto out;
2124  }
2125 
2126  s->iccentries[seqno - 1].length = len;
2127  s->iccentries[seqno - 1].data = av_malloc(len);
2128  if (!s->iccentries[seqno - 1].data) {
2129  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2130  return AVERROR(ENOMEM);
2131  }
2132 
2133  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2134  skip_bits(&s->gb, len << 3);
2135  len = 0;
2136  s->iccread++;
2137 
2138  if (s->iccread > s->iccnum)
2139  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2140  }
2141 
2142 out:
2143  /* slow but needed for extreme adobe jpegs */
2144  if (len < 0)
2145  av_log(s->avctx, AV_LOG_ERROR,
2146  "mjpeg: error, decode_app parser read over the end\n");
2147  while (len-- > 0)
2148  skip_bits(&s->gb, 8);
2149 
2150  return 0;
2151 }
2152 
2154 {
2155  int len = get_bits(&s->gb, 16);
2156  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2157  int i;
2158  char *cbuf = av_malloc(len - 1);
2159  if (!cbuf)
2160  return AVERROR(ENOMEM);
2161 
2162  for (i = 0; i < len - 2; i++)
2163  cbuf[i] = get_bits(&s->gb, 8);
2164  if (i > 0 && cbuf[i - 1] == '\n')
2165  cbuf[i - 1] = 0;
2166  else
2167  cbuf[i] = 0;
2168 
2169  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2170  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2171 
2172  /* buggy avid, it puts EOI only at every 10th frame */
2173  if (!strncmp(cbuf, "AVID", 4)) {
2174  parse_avid(s, cbuf, len);
2175  } else if (!strcmp(cbuf, "CS=ITU601"))
2176  s->cs_itu601 = 1;
2177  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2178  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2179  s->flipped = 1;
2180  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2181  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2182  s->multiscope = 2;
2183  }
2184 
2185  av_free(cbuf);
2186  }
2187 
2188  return 0;
2189 }
2190 
2191 /* return the 8 bit start code value and update the search
2192  state. Return -1 if no start code found */
2193 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2194 {
2195  const uint8_t *buf_ptr;
2196  unsigned int v, v2;
2197  int val;
2198  int skipped = 0;
2199 
2200  buf_ptr = *pbuf_ptr;
2201  while (buf_end - buf_ptr > 1) {
2202  v = *buf_ptr++;
2203  v2 = *buf_ptr;
2204  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2205  val = *buf_ptr++;
2206  goto found;
2207  }
2208  skipped++;
2209  }
2210  buf_ptr = buf_end;
2211  val = -1;
2212 found:
2213  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2214  *pbuf_ptr = buf_ptr;
2215  return val;
2216 }
2217 
2219  const uint8_t **buf_ptr, const uint8_t *buf_end,
2220  const uint8_t **unescaped_buf_ptr,
2221  int *unescaped_buf_size)
2222 {
2223  int start_code;
2224  start_code = find_marker(buf_ptr, buf_end);
2225 
2226  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2227  if (!s->buffer)
2228  return AVERROR(ENOMEM);
2229 
2230  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2231  if (start_code == SOS && !s->ls) {
2232  const uint8_t *src = *buf_ptr;
2233  const uint8_t *ptr = src;
2234  uint8_t *dst = s->buffer;
2235 
2236  #define copy_data_segment(skip) do { \
2237  ptrdiff_t length = (ptr - src) - (skip); \
2238  if (length > 0) { \
2239  memcpy(dst, src, length); \
2240  dst += length; \
2241  src = ptr; \
2242  } \
2243  } while (0)
2244 
2245  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2246  ptr = buf_end;
2247  copy_data_segment(0);
2248  } else {
2249  while (ptr < buf_end) {
2250  uint8_t x = *(ptr++);
2251 
2252  if (x == 0xff) {
2253  ptrdiff_t skip = 0;
2254  while (ptr < buf_end && x == 0xff) {
2255  x = *(ptr++);
2256  skip++;
2257  }
2258 
2259  /* 0xFF, 0xFF, ... */
2260  if (skip > 1) {
2262 
2263  /* decrement src as it is equal to ptr after the
2264  * copy_data_segment macro and we might want to
2265  * copy the current value of x later on */
2266  src--;
2267  }
2268 
2269  if (x < RST0 || x > RST7) {
2270  copy_data_segment(1);
2271  if (x)
2272  break;
2273  }
2274  }
2275  }
2276  if (src < ptr)
2277  copy_data_segment(0);
2278  }
2279  #undef copy_data_segment
2280 
2281  *unescaped_buf_ptr = s->buffer;
2282  *unescaped_buf_size = dst - s->buffer;
2283  memset(s->buffer + *unescaped_buf_size, 0,
2285 
2286  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2287  (buf_end - *buf_ptr) - (dst - s->buffer));
2288  } else if (start_code == SOS && s->ls) {
2289  const uint8_t *src = *buf_ptr;
2290  uint8_t *dst = s->buffer;
2291  int bit_count = 0;
2292  int t = 0, b = 0;
2293  PutBitContext pb;
2294 
2295  /* find marker */
2296  while (src + t < buf_end) {
2297  uint8_t x = src[t++];
2298  if (x == 0xff) {
2299  while ((src + t < buf_end) && x == 0xff)
2300  x = src[t++];
2301  if (x & 0x80) {
2302  t -= FFMIN(2, t);
2303  break;
2304  }
2305  }
2306  }
2307  bit_count = t * 8;
2308  init_put_bits(&pb, dst, t);
2309 
2310  /* unescape bitstream */
2311  while (b < t) {
2312  uint8_t x = src[b++];
2313  put_bits(&pb, 8, x);
2314  if (x == 0xFF && b < t) {
2315  x = src[b++];
2316  if (x & 0x80) {
2317  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2318  x &= 0x7f;
2319  }
2320  put_bits(&pb, 7, x);
2321  bit_count--;
2322  }
2323  }
2324  flush_put_bits(&pb);
2325 
2326  *unescaped_buf_ptr = dst;
2327  *unescaped_buf_size = (bit_count + 7) >> 3;
2328  memset(s->buffer + *unescaped_buf_size, 0,
2330  } else {
2331  *unescaped_buf_ptr = *buf_ptr;
2332  *unescaped_buf_size = buf_end - *buf_ptr;
2333  }
2334 
2335  return start_code;
2336 }
2337 
2339 {
2340  int i;
2341 
2342  if (s->iccentries) {
2343  for (i = 0; i < s->iccnum; i++)
2344  av_freep(&s->iccentries[i].data);
2345  av_freep(&s->iccentries);
2346  }
2347 
2348  s->iccread = 0;
2349  s->iccnum = 0;
2350 }
2351 
2353  int *got_frame, const AVPacket *avpkt,
2354  const uint8_t *buf, const int buf_size)
2355 {
2356  MJpegDecodeContext *s = avctx->priv_data;
2357  const uint8_t *buf_end, *buf_ptr;
2358  const uint8_t *unescaped_buf_ptr;
2359  int hshift, vshift;
2360  int unescaped_buf_size;
2361  int start_code;
2362  int index;
2363  int ret = 0;
2364  int is16bit;
2365 
2366  s->force_pal8 = 0;
2367 
2368  s->buf_size = buf_size;
2369 
2370  av_exif_free(&s->exif_metadata);
2371  av_freep(&s->stereo3d);
2372  s->adobe_transform = -1;
2373 
2374  if (s->iccnum != 0)
2376 
2377 redo_for_pal8:
2378  buf_ptr = buf;
2379  buf_end = buf + buf_size;
2380  while (buf_ptr < buf_end) {
2381  /* find start next marker */
2382  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2383  &unescaped_buf_ptr,
2384  &unescaped_buf_size);
2385  /* EOF */
2386  if (start_code < 0) {
2387  break;
2388  } else if (unescaped_buf_size > INT_MAX / 8) {
2389  av_log(avctx, AV_LOG_ERROR,
2390  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2391  start_code, unescaped_buf_size, buf_size);
2392  return AVERROR_INVALIDDATA;
2393  }
2394  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2395  start_code, buf_end - buf_ptr);
2396 
2397  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2398 
2399  if (ret < 0) {
2400  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2401  goto fail;
2402  }
2403 
2404  s->start_code = start_code;
2405  if (avctx->debug & FF_DEBUG_STARTCODE)
2406  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2407 
2408  /* process markers */
2409  if (start_code >= RST0 && start_code <= RST7) {
2410  av_log(avctx, AV_LOG_DEBUG,
2411  "restart marker: %d\n", start_code & 0x0f);
2412  /* APP fields */
2413  } else if (start_code >= APP0 && start_code <= APP15) {
2414  if ((ret = mjpeg_decode_app(s)) < 0)
2415  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2416  av_err2str(ret));
2417  /* Comment */
2418  } else if (start_code == COM) {
2419  ret = mjpeg_decode_com(s);
2420  if (ret < 0)
2421  return ret;
2422  } else if (start_code == DQT) {
2424  if (ret < 0)
2425  return ret;
2426  }
2427 
2428  ret = -1;
2429 
2430  if (!CONFIG_JPEGLS_DECODER &&
2431  (start_code == SOF48 || start_code == LSE)) {
2432  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2433  return AVERROR(ENOSYS);
2434  }
2435 
2436  if (avctx->skip_frame == AVDISCARD_ALL) {
2437  switch(start_code) {
2438  case SOF0:
2439  case SOF1:
2440  case SOF2:
2441  case SOF3:
2442  case SOF48:
2443  break;
2444  default:
2445  goto skip;
2446  }
2447  }
2448 
2449  switch (start_code) {
2450  case SOI:
2451  s->restart_interval = 0;
2452  s->restart_count = 0;
2453  s->raw_image_buffer = buf_ptr;
2454  s->raw_image_buffer_size = buf_end - buf_ptr;
2455  /* nothing to do on SOI */
2456  break;
2457  case DHT:
2458  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2459  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2460  goto fail;
2461  }
2462  break;
2463  case SOF0:
2464  case SOF1:
2465  if (start_code == SOF0)
2467  else
2469  s->lossless = 0;
2470  s->ls = 0;
2471  s->progressive = 0;
2472  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2473  goto fail;
2474  break;
2475  case SOF2:
2477  s->lossless = 0;
2478  s->ls = 0;
2479  s->progressive = 1;
2480  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2481  goto fail;
2482  break;
2483  case SOF3:
2485 #if FF_API_CODEC_PROPS
2489 #endif
2490  s->lossless = 1;
2491  s->ls = 0;
2492  s->progressive = 0;
2493  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2494  goto fail;
2495  break;
2496  case SOF48:
2498 #if FF_API_CODEC_PROPS
2502 #endif
2503  s->lossless = 1;
2504  s->ls = 1;
2505  s->progressive = 0;
2506  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2507  goto fail;
2508  break;
2509  case LSE:
2510  if (!CONFIG_JPEGLS_DECODER ||
2511  (ret = ff_jpegls_decode_lse(s)) < 0)
2512  goto fail;
2513  if (ret == 1)
2514  goto redo_for_pal8;
2515  break;
2516  case EOI:
2517 eoi_parser:
2518  if (!avctx->hwaccel &&
2519  s->progressive && s->cur_scan && s->got_picture)
2521  s->cur_scan = 0;
2522  if (!s->got_picture) {
2523  av_log(avctx, AV_LOG_WARNING,
2524  "Found EOI before any SOF, ignoring\n");
2525  break;
2526  }
2527  if (s->interlaced) {
2528  s->bottom_field ^= 1;
2529  /* if not bottom field, do not output image yet */
2530  if (s->bottom_field == !s->interlace_polarity)
2531  break;
2532  }
2533  if (avctx->hwaccel) {
2534  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2535  if (ret < 0)
2536  return ret;
2537 
2538  av_freep(&s->hwaccel_picture_private);
2539  }
2540  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2541  return ret;
2542  if (s->lossless)
2543  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2544  *got_frame = 1;
2545  s->got_picture = 0;
2546 
2547  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2548  int qp = FFMAX3(s->qscale[0],
2549  s->qscale[1],
2550  s->qscale[2]);
2551 
2552  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2553  }
2554 
2555  goto the_end;
2556  case SOS:
2557  s->raw_scan_buffer = buf_ptr;
2558  s->raw_scan_buffer_size = buf_end - buf_ptr;
2559 
2560  s->cur_scan++;
2561 
2562  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2563  (avctx->err_recognition & AV_EF_EXPLODE))
2564  goto fail;
2565  break;
2566  case DRI:
2567  if ((ret = mjpeg_decode_dri(s)) < 0)
2568  return ret;
2569  break;
2570  case SOF5:
2571  case SOF6:
2572  case SOF7:
2573  case SOF9:
2574  case SOF10:
2575  case SOF11:
2576  case SOF13:
2577  case SOF14:
2578  case SOF15:
2579  case JPG:
2580  av_log(avctx, AV_LOG_ERROR,
2581  "mjpeg: unsupported coding type (%x)\n", start_code);
2582  break;
2583  }
2584 
2585  if (avctx->skip_frame == AVDISCARD_ALL) {
2586  switch(start_code) {
2587  case SOF0:
2588  case SOF1:
2589  case SOF2:
2590  case SOF3:
2591  case SOF48:
2592  s->got_picture = 0;
2593  goto the_end_no_picture;
2594  }
2595  }
2596 
2597 skip:
2598  /* eof process start code */
2599  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2600  av_log(avctx, AV_LOG_DEBUG,
2601  "marker parser used %d bytes (%d bits)\n",
2602  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2603  }
2604  if (s->got_picture && s->cur_scan) {
2605  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2606  goto eoi_parser;
2607  }
2608  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2609  return AVERROR_INVALIDDATA;
2610 fail:
2611  s->got_picture = 0;
2612  return ret;
2613 the_end:
2614 
2615  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2616 
2617  if (AV_RB32(s->upscale_h)) {
2618  int p;
2620  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2621  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2622  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2623  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2624  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2625  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2626  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2627  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2628  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2629  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2630  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2631  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2632  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2633  );
2634  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2635  if (ret)
2636  return ret;
2637 
2638  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2639  for (p = 0; p<s->nb_components; p++) {
2640  uint8_t *line = s->picture_ptr->data[p];
2641  int w = s->width;
2642  int h = s->height;
2643  if (!s->upscale_h[p])
2644  continue;
2645  if (p==1 || p==2) {
2646  w = AV_CEIL_RSHIFT(w, hshift);
2647  h = AV_CEIL_RSHIFT(h, vshift);
2648  }
2649  if (s->upscale_v[p] == 1)
2650  h = (h+1)>>1;
2651  av_assert0(w > 0);
2652  for (int i = 0; i < h; i++) {
2653  if (s->upscale_h[p] == 1) {
2654  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2655  else line[w - 1] = line[(w - 1) / 2];
2656  for (index = w - 2; index > 0; index--) {
2657  if (is16bit)
2658  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2659  else
2660  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2661  }
2662  } else if (s->upscale_h[p] == 2) {
2663  if (is16bit) {
2664  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2665  if (w > 1)
2666  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2667  } else {
2668  line[w - 1] = line[(w - 1) / 3];
2669  if (w > 1)
2670  line[w - 2] = line[w - 1];
2671  }
2672  for (index = w - 3; index > 0; index--) {
2673  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2674  }
2675  } else if (s->upscale_h[p] == 4){
2676  if (is16bit) {
2677  uint16_t *line16 = (uint16_t *) line;
2678  line16[w - 1] = line16[(w - 1) >> 2];
2679  if (w > 1)
2680  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2681  if (w > 2)
2682  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2683  } else {
2684  line[w - 1] = line[(w - 1) >> 2];
2685  if (w > 1)
2686  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2687  if (w > 2)
2688  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2689  }
2690  for (index = w - 4; index > 0; index--)
2691  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2692  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2693  }
2694  line += s->linesize[p];
2695  }
2696  }
2697  }
2698  if (AV_RB32(s->upscale_v)) {
2699  int p;
2701  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2702  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2703  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2704  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2705  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2706  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2707  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2708  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2709  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2710  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2711  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2712  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2713  );
2714  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2715  if (ret)
2716  return ret;
2717 
2718  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2719  for (p = 0; p < s->nb_components; p++) {
2720  uint8_t *dst;
2721  int w = s->width;
2722  int h = s->height;
2723  if (!s->upscale_v[p])
2724  continue;
2725  if (p==1 || p==2) {
2726  w = AV_CEIL_RSHIFT(w, hshift);
2727  h = AV_CEIL_RSHIFT(h, vshift);
2728  }
2729  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2730  for (int i = h - 1; i; i--) {
2731  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2732  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2733  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2734  memcpy(dst, src1, w);
2735  } else {
2736  for (index = 0; index < w; index++)
2737  dst[index] = (src1[index] + src2[index]) >> 1;
2738  }
2739  dst -= s->linesize[p];
2740  }
2741  }
2742  }
2743  if (s->flipped && !s->rgb) {
2744  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2745  if (ret)
2746  return ret;
2747 
2748  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2749  for (index=0; index<s->nb_components; index++) {
2750  int h = frame->height;
2751  if (index && index < 3)
2752  h = AV_CEIL_RSHIFT(h, vshift);
2753  if (frame->data[index]) {
2754  frame->data[index] += (h - 1) * frame->linesize[index];
2755  frame->linesize[index] *= -1;
2756  }
2757  }
2758  }
2759 
2760  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2761  av_assert0(s->nb_components == 3);
2762  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2763  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2764  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2765  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2766  }
2767 
2768  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2769  int w = s->picture_ptr->width;
2770  int h = s->picture_ptr->height;
2771  av_assert0(s->nb_components == 4);
2772  for (int i = 0; i < h; i++) {
2773  int j;
2774  uint8_t *dst[4];
2775  for (index=0; index<4; index++) {
2776  dst[index] = s->picture_ptr->data[index]
2777  + s->picture_ptr->linesize[index]*i;
2778  }
2779  for (j=0; j<w; j++) {
2780  int k = dst[3][j];
2781  int r = dst[0][j] * k;
2782  int g = dst[1][j] * k;
2783  int b = dst[2][j] * k;
2784  dst[0][j] = g*257 >> 16;
2785  dst[1][j] = b*257 >> 16;
2786  dst[2][j] = r*257 >> 16;
2787  }
2788  memset(dst[3], 255, w);
2789  }
2790  }
2791  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2792  int w = s->picture_ptr->width;
2793  int h = s->picture_ptr->height;
2794  av_assert0(s->nb_components == 4);
2795  for (int i = 0; i < h; i++) {
2796  int j;
2797  uint8_t *dst[4];
2798  for (index=0; index<4; index++) {
2799  dst[index] = s->picture_ptr->data[index]
2800  + s->picture_ptr->linesize[index]*i;
2801  }
2802  for (j=0; j<w; j++) {
2803  int k = dst[3][j];
2804  int r = (255 - dst[0][j]) * k;
2805  int g = (128 - dst[1][j]) * k;
2806  int b = (128 - dst[2][j]) * k;
2807  dst[0][j] = r*257 >> 16;
2808  dst[1][j] = (g*257 >> 16) + 128;
2809  dst[2][j] = (b*257 >> 16) + 128;
2810  }
2811  memset(dst[3], 255, w);
2812  }
2813  }
2814 
2815  if (s->stereo3d) {
2817  if (stereo) {
2818  stereo->type = s->stereo3d->type;
2819  stereo->flags = s->stereo3d->flags;
2820  }
2821  av_freep(&s->stereo3d);
2822  }
2823 
2824  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2825  AVFrameSideData *sd;
2826  size_t offset = 0;
2827  int total_size = 0;
2828 
2829  /* Sum size of all parts. */
2830  for (int i = 0; i < s->iccnum; i++)
2831  total_size += s->iccentries[i].length;
2832 
2833  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2834  if (ret < 0) {
2835  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2836  return ret;
2837  }
2838 
2839  if (sd) {
2840  /* Reassemble the parts, which are now in-order. */
2841  for (int i = 0; i < s->iccnum; i++) {
2842  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2843  offset += s->iccentries[i].length;
2844  }
2845  }
2846  }
2847 
2848  if (s->exif_metadata.entries) {
2849  ret = ff_decode_exif_attach_ifd(avctx, frame, &s->exif_metadata);
2850  av_exif_free(&s->exif_metadata);
2851  if (ret < 0)
2852  av_log(avctx, AV_LOG_WARNING, "couldn't attach EXIF metadata\n");
2853  }
2854 
2855  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2856  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2857  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2858  avctx->coded_height > s->orig_height) {
2859  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2860  frame->crop_top = frame->height - avctx->height;
2861  }
2862 
2863 the_end_no_picture:
2864  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2865  buf_end - buf_ptr);
2866  return buf_ptr - buf;
2867 }
2868 
2869 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2870  AVPacket *avpkt)
2871 {
2872  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2873  avpkt, avpkt->data, avpkt->size);
2874 }
2875 
2876 
2877 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2878  * even without having called ff_mjpeg_decode_init(). */
2880 {
2881  MJpegDecodeContext *s = avctx->priv_data;
2882  int i, j;
2883 
2884  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2885  av_log(avctx, AV_LOG_INFO, "Single field\n");
2886  }
2887 
2888  av_frame_free(&s->picture);
2889  s->picture_ptr = NULL;
2890 
2891  av_frame_free(&s->smv_frame);
2892 
2893  av_freep(&s->buffer);
2894  av_freep(&s->stereo3d);
2895  av_freep(&s->ljpeg_buffer);
2896  s->ljpeg_buffer_size = 0;
2897 
2898  for (i = 0; i < 3; i++) {
2899  for (j = 0; j < 4; j++)
2900  ff_vlc_free(&s->vlcs[i][j]);
2901  }
2902  for (i = 0; i < MAX_COMPONENTS; i++) {
2903  av_freep(&s->blocks[i]);
2904  av_freep(&s->last_nnz[i]);
2905  }
2906  av_exif_free(&s->exif_metadata);
2907 
2909 
2910  av_freep(&s->hwaccel_picture_private);
2911  av_freep(&s->jls_state);
2912 
2913  return 0;
2914 }
2915 
2917 {
2918  MJpegDecodeContext *s = avctx->priv_data;
2919  s->got_picture = 0;
2920 
2921  s->smv_next_frame = 0;
2922  av_frame_unref(s->smv_frame);
2923 }
2924 
2925 #if CONFIG_MJPEG_DECODER
2926 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2927 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2928 static const AVOption options[] = {
2929  { "extern_huff", "Use external huffman table.",
2930  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2931  { NULL },
2932 };
2933 
2934 static const AVClass mjpegdec_class = {
2935  .class_name = "MJPEG decoder",
2936  .item_name = av_default_item_name,
2937  .option = options,
2938  .version = LIBAVUTIL_VERSION_INT,
2939 };
2940 
2941 const FFCodec ff_mjpeg_decoder = {
2942  .p.name = "mjpeg",
2943  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2944  .p.type = AVMEDIA_TYPE_VIDEO,
2945  .p.id = AV_CODEC_ID_MJPEG,
2946  .priv_data_size = sizeof(MJpegDecodeContext),
2950  .flush = decode_flush,
2951  .p.capabilities = AV_CODEC_CAP_DR1,
2952  .p.max_lowres = 3,
2953  .p.priv_class = &mjpegdec_class,
2954  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2955  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2958  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2959 #if CONFIG_MJPEG_NVDEC_HWACCEL
2960  HWACCEL_NVDEC(mjpeg),
2961 #endif
2962 #if CONFIG_MJPEG_VAAPI_HWACCEL
2963  HWACCEL_VAAPI(mjpeg),
2964 #endif
2965  NULL
2966  },
2967 };
2968 #endif
2969 #if CONFIG_THP_DECODER
2970 const FFCodec ff_thp_decoder = {
2971  .p.name = "thp",
2972  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
2973  .p.type = AVMEDIA_TYPE_VIDEO,
2974  .p.id = AV_CODEC_ID_THP,
2975  .priv_data_size = sizeof(MJpegDecodeContext),
2979  .flush = decode_flush,
2980  .p.capabilities = AV_CODEC_CAP_DR1,
2981  .p.max_lowres = 3,
2982  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2983 };
2984 #endif
2985 
2986 #if CONFIG_SMVJPEG_DECODER
2987 // SMV JPEG just stacks several output frames into one JPEG picture
2988 // we handle that by setting up the cropping parameters appropriately
2989 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
2990 {
2991  MJpegDecodeContext *s = avctx->priv_data;
2992 
2993  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2994 
2995  frame->width = avctx->coded_width;
2996  frame->height = avctx->coded_height;
2997  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2998  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2999 
3000  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3001  s->smv_frame->pts += s->smv_frame->duration;
3002  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3003 
3004  if (s->smv_next_frame == 0)
3005  av_frame_unref(s->smv_frame);
3006 }
3007 
3008 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3009 {
3010  MJpegDecodeContext *s = avctx->priv_data;
3011  AVPacket *const pkt = avctx->internal->in_pkt;
3012  int got_frame = 0;
3013  int ret;
3014 
3015  if (s->smv_next_frame > 0)
3016  goto return_frame;
3017 
3018  ret = ff_decode_get_packet(avctx, pkt);
3019  if (ret < 0)
3020  return ret;
3021 
3022  av_frame_unref(s->smv_frame);
3023 
3024  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3025  s->smv_frame->pkt_dts = pkt->dts;
3027  if (ret < 0)
3028  return ret;
3029 
3030  if (!got_frame)
3031  return AVERROR(EAGAIN);
3032 
3033  // packet duration covers all the frames in the packet
3034  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3035 
3036 return_frame:
3037  av_assert0(s->smv_frame->buf[0]);
3038  ret = av_frame_ref(frame, s->smv_frame);
3039  if (ret < 0)
3040  return ret;
3041 
3042  smv_process_frame(avctx, frame);
3043  return 0;
3044 }
3045 
3046 const FFCodec ff_smvjpeg_decoder = {
3047  .p.name = "smvjpeg",
3048  CODEC_LONG_NAME("SMV JPEG"),
3049  .p.type = AVMEDIA_TYPE_VIDEO,
3050  .p.id = AV_CODEC_ID_SMVJPEG,
3051  .priv_data_size = sizeof(MJpegDecodeContext),
3054  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3055  .flush = decode_flush,
3056  .p.capabilities = AV_CODEC_CAP_DR1,
3057  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3059 };
3060 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
flags
const SwsFlags flags[]
Definition: swscale.c:61
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:433
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:276
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:245
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:493
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:689
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:360
opt.h
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:767
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:659
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1203
out
FILE * out
Definition: movenc.c:55
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1426
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
APP1
@ APP1
Definition: mjpeg.h:80
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3447
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:978
SOF0
@ SOF0
Definition: mjpeg.h:39
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1398
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:568
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:419
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:260
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:113
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:224
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:767
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:558
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:682
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:209
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:512
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1375
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:247
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3487
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:696
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:238
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1269
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1442
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:122
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1662
fail
#define fail()
Definition: checkasm.h:206
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:109
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2352
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2153
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:59
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:612
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3475
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:191
aligned
static int aligned(int val)
Definition: dashdec.c:171
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:881
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1638
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:212
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1061
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:185
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:346
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:102
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:355
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2338
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2879
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:331
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:529
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2412
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:530
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1633
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:195
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
Definition: mjpegdec.c:819
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:241
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
options
Definition: swscale.c:43
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:231
MJpegDecodeContext
Definition: mjpegdec.h:55
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1457
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:996
lowres
static int lowres
Definition: ffplay.c:330
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1578
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:646
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1697
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1720
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1096
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:559
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2869
av_bswap32
#define av_bswap32
Definition: bswap.h:47
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:898
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1668
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2114
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
SOF15
@ SOF15
Definition: mjpeg.h:54
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:174
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:557
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:290
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2193
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:177
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2315
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:834
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:369
VD
#define VD
Definition: amfdec.c:671
src2
const pixel * src2
Definition: h264pred_template.c:421
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:215
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1834
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:63
decode_flush
static av_cold void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2916
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1382
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:676
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:750
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1878
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:970
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:35
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:555
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1379
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:367
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2218
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:431
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:354
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:243
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:789
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1618
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:264
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1374
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:299
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1846
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:535
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1637
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:46
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347