FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include <float.h>
37 
38 #include "libavutil/attributes.h"
40 #include "libavutil/avstring.h"
41 #include "libavutil/error.h"
42 #include "libavutil/intreadwrite.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/opt.h"
45 #include "libavutil/reverse.h"
46 #include "avcodec.h"
47 #include "bytestream.h"
48 #include "codec_internal.h"
49 #include "decode.h"
50 #include "exif_internal.h"
51 #include "faxcompr.h"
52 #include "lzw.h"
53 #include "tiff.h"
54 #include "tiff_common.h"
55 #include "tiff_data.h"
56 #include "mjpegdec.h"
57 #include "thread.h"
58 #include "get_bits.h"
59 
60 typedef struct TiffContext {
61  AVClass *class;
64 
65  /* JPEG decoding for DNG */
66  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
67  AVPacket *jpkt; // encoded JPEG tile
68  AVFrame *jpgframe; // decoded JPEG tile
69 
71  uint16_t get_page;
73 
75  int width, height;
76  unsigned int bpp, bppcount;
77  uint32_t palette[256];
79  int le;
82  int planar;
83  int subsampling[2];
84  int fax_opts;
85  int predictor;
87  uint32_t res[4];
89  unsigned last_tag;
90 
91  int is_bayer;
93  uint8_t pattern[4];
94 
95  float analog_balance[4];
96  float as_shot_neutral[4];
97  float as_shot_white[4];
98  float color_matrix[3][4];
99  float camera_calibration[4][4];
100  float premultiply[4];
101  float black_level[4];
102 
103  unsigned white_level;
104  uint16_t dng_lut[65536];
105 
106  uint32_t sub_ifd;
107  uint16_t cur_page;
108 
110  int sot;
113 
114  /* Tile support */
115  int is_tiled;
118 
119  int is_jpeg;
120 
121  uint8_t *deinvert_buf;
123  uint8_t *yuv_line;
124  unsigned int yuv_line_size;
125 
128 
130 } TiffContext;
131 
132 static const float d65_white[3] = { 0.950456f, 1.f, 1.088754f };
133 
134 static void tiff_set_type(TiffContext *s, enum TiffType tiff_type) {
135  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
136  s->tiff_type = tiff_type;
137 }
138 
139 static void free_geotags(TiffContext *const s)
140 {
141  for (int i = 0; i < s->geotag_count; i++)
142  av_freep(&s->geotags[i].val);
143  av_freep(&s->geotags);
144  s->geotag_count = 0;
145 }
146 
147 static const char *get_geokey_name(int key)
148 {
149 #define RET_GEOKEY_STR(TYPE, array)\
150  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
151  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
152  return tiff_##array##_name_type_string + tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].offset;
153 
154  RET_GEOKEY_STR(VERT, vert);
155  RET_GEOKEY_STR(PROJ, proj);
156  RET_GEOKEY_STR(GEOG, geog);
157  RET_GEOKEY_STR(CONF, conf);
158 
159  return NULL;
160 }
161 
162 static int get_geokey_type(int key)
163 {
164 #define RET_GEOKEY_TYPE(TYPE, array)\
165  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
166  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
167  return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].type;
168  RET_GEOKEY_TYPE(VERT, vert);
169  RET_GEOKEY_TYPE(PROJ, proj);
170  RET_GEOKEY_TYPE(GEOG, geog);
171  RET_GEOKEY_TYPE(CONF, conf);
172 
173  return AVERROR_INVALIDDATA;
174 }
175 
176 static int cmp_id_key(const void *id, const void *k)
177 {
178  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
179 }
180 
181 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
182 {
183  const TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
184  if(r)
185  return r->name;
186 
187  return NULL;
188 }
189 
190 static const char *get_geokey_val(int key, uint16_t val)
191 {
193  return "undefined";
195  return "User-Defined";
196 
197 #define RET_GEOKEY_VAL(TYPE, array)\
198  if (val >= TIFF_##TYPE##_OFFSET &&\
199  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\
200  return tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET];
201 
202  switch (key) {
204  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
205  break;
207  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
208  break;
212  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
213  break;
216  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
217  break;
219  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
220  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
221  break;
223  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
224  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
225  break;
227  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
228  break;
230  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
231  break;
237  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
238  break;
240  RET_GEOKEY_VAL(VERT_CS, vert_cs);
241  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
242  break;
243 
244  }
245 
246  return NULL;
247 }
248 
249 static char *doubles2str(double *dp, int count, const char *sep)
250 {
251  int i;
252  char *ap, *ap0;
253  uint64_t component_len;
254  if (!sep) sep = ", ";
255  component_len = 24LL + strlen(sep);
256  if (count >= (INT_MAX - 1)/component_len)
257  return NULL;
258  ap = av_malloc(component_len * count + 1);
259  if (!ap)
260  return NULL;
261  ap0 = ap;
262  ap[0] = '\0';
263  for (i = 0; i < count; i++) {
264  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
265  if(l >= component_len) {
266  av_free(ap0);
267  return NULL;
268  }
269  ap += l;
270  }
271  ap0[strlen(ap0) - strlen(sep)] = '\0';
272  return ap0;
273 }
274 
275 static int add_metadata(int count, int type,
276  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
277 {
278  switch(type) {
279  case AV_TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
280  case AV_TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
281  case AV_TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
282  default : return AVERROR_INVALIDDATA;
283  };
284 }
285 
286 /**
287  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
288  */
289 static uint16_t av_always_inline dng_process_color16(uint16_t value,
290  const uint16_t *lut,
291  float black_level,
292  float scale_factor)
293 {
294  float value_norm;
295 
296  // Lookup table lookup
297  value = lut[value];
298 
299  // Black level subtraction
300  // Color scaling
301  value_norm = ((float)value - black_level) * scale_factor;
302 
303  value = av_clip_uint16(lrintf(value_norm));
304 
305  return value;
306 }
307 
308 static uint16_t av_always_inline dng_process_color8(uint16_t value,
309  const uint16_t *lut,
310  float black_level,
311  float scale_factor)
312 {
313  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
314 }
315 
316 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
317  const uint8_t *src, int src_stride, int width, int height,
318  int is_single_comp, int is_u16, int odd_line)
319 {
320  float scale_factor[4];
321  int line, col;
322 
323  if (s->is_bayer) {
324  for (int i = 0; i < 4; i++)
325  scale_factor[i] = s->premultiply[s->pattern[i]] * 65535.f / (s->white_level - s->black_level[i]);
326  } else {
327  for (int i = 0; i < 4; i++)
328  scale_factor[i] = s->premultiply[ i ] * 65535.f / (s->white_level - s->black_level[i]);
329  }
330 
331  if (is_single_comp) {
332  if (!is_u16)
333  return; /* <= 8bpp unsupported */
334 
335  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
336  (split vertically in the middle). */
337  for (line = 0; line < height / 2; line++) {
338  uint16_t *dst_u16 = (uint16_t *)dst;
339  const uint16_t *src_u16 = (const uint16_t *)src;
340 
341  /* Blit first half of input row row to initial row of output */
342  for (col = 0; col < width; col++)
343  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[col&1], scale_factor[col&1]);
344 
345  /* Advance the destination pointer by a row (source pointer remains in the same place) */
346  dst += dst_stride * sizeof(uint16_t);
347  dst_u16 = (uint16_t *)dst;
348 
349  /* Blit second half of input row row to next row of output */
350  for (col = 0; col < width; col++)
351  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[(col&1) + 2], scale_factor[(col&1) + 2]);
352 
353  dst += dst_stride * sizeof(uint16_t);
354  src += src_stride * sizeof(uint16_t);
355  }
356  } else {
357  /* Input and output image are the same size and the MJpeg decoder has done per-component
358  deinterleaving, so blitting here is straightforward. */
359  if (is_u16) {
360  for (line = 0; line < height; line++) {
361  uint16_t *dst_u16 = (uint16_t *)dst;
362  const uint16_t *src_u16 = (const uint16_t *)src;
363 
364  for (col = 0; col < width; col++)
365  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut,
366  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
367  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
368 
369  dst += dst_stride * sizeof(uint16_t);
370  src += src_stride * sizeof(uint16_t);
371  }
372  } else {
373  for (line = 0; line < height; line++) {
374  uint8_t *dst_u8 = dst;
375  const uint8_t *src_u8 = src;
376 
377  for (col = 0; col < width; col++)
378  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut,
379  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
380  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
381 
382  dst += dst_stride;
383  src += src_stride;
384  }
385  }
386  }
387 }
388 
390  unsigned int bpp, uint8_t* dst,
391  int usePtr, const uint8_t *src,
392  uint8_t c, int width, int offset)
393 {
394  switch (bpp) {
395  case 1:
396  while (--width >= 0) {
397  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
398  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
399  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
400  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
401  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
402  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
403  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
404  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
405  }
406  break;
407  case 2:
408  while (--width >= 0) {
409  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
410  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
411  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
412  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
413  }
414  break;
415  case 4:
416  while (--width >= 0) {
417  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
418  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
419  }
420  break;
421  case 10:
422  case 12:
423  case 14: {
424  uint16_t *dst16 = (uint16_t *)dst;
425  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
426  uint8_t shift = is_dng ? 0 : 16 - bpp;
427  GetBitContext gb;
428 
429  av_unused int ret = init_get_bits8(&gb, src, width);
430  av_assert1(ret >= 0);
431  for (int i = 0; i < s->width; i++) {
432  dst16[i] = get_bits(&gb, bpp) << shift;
433  }
434  }
435  break;
436  default:
437  if (usePtr) {
438  memcpy(dst + offset, src, width);
439  } else {
440  memset(dst + offset, c, width);
441  }
442  }
443 }
444 
445 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
446 {
447  int i;
448 
449  av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
450  if (!s->deinvert_buf)
451  return AVERROR(ENOMEM);
452  for (i = 0; i < size; i++)
453  s->deinvert_buf[i] = ff_reverse[src[i]];
454 
455  return 0;
456 }
457 
458 static void unpack_gray(TiffContext *s, AVFrame *p,
459  const uint8_t *src, int lnum, int width, int bpp)
460 {
461  GetBitContext gb;
462  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
463 
464  av_unused int ret = init_get_bits8(&gb, src, width);
465  av_assert1(ret >= 0);
466 
467  for (int i = 0; i < s->width; i++) {
468  dst[i] = get_bits(&gb, bpp);
469  }
470 }
471 
472 static void unpack_yuv(TiffContext *s, AVFrame *p,
473  const uint8_t *src, int lnum)
474 {
475  int i, j, k;
476  int w = (s->width - 1) / s->subsampling[0] + 1;
477  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
478  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
479  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
480  for (i = 0; i < w; i++) {
481  for (j = 0; j < s->subsampling[1]; j++)
482  for (k = 0; k < s->subsampling[0]; k++)
483  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
484  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
485  *pu++ = *src++;
486  *pv++ = *src++;
487  }
488  }else{
489  for (i = 0; i < w; i++) {
490  for (j = 0; j < s->subsampling[1]; j++)
491  for (k = 0; k < s->subsampling[0]; k++)
492  p->data[0][(lnum + j) * p->linesize[0] +
493  i * s->subsampling[0] + k] = *src++;
494  *pu++ = *src++;
495  *pv++ = *src++;
496  }
497  }
498 }
499 
500 #if CONFIG_ZLIB
501 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
502  int size)
503 {
504  z_stream zstream = { 0 };
505  int zret;
506 
507  zstream.next_in = src;
508  zstream.avail_in = size;
509  zstream.next_out = dst;
510  zstream.avail_out = *len;
511  zret = inflateInit(&zstream);
512  if (zret != Z_OK) {
513  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
514  return zret;
515  }
516  zret = inflate(&zstream, Z_SYNC_FLUSH);
517  inflateEnd(&zstream);
518  *len = zstream.total_out;
519  return zret == Z_STREAM_END ? Z_OK : zret;
520 }
521 
522 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
523  const uint8_t *src, int size, int width, int lines,
524  int strip_start, int is_yuv)
525 {
526  uint8_t *zbuf;
527  unsigned long outlen;
528  int ret, line;
529  outlen = width * lines;
530  zbuf = av_malloc(outlen);
531  if (!zbuf)
532  return AVERROR(ENOMEM);
533  if (s->fill_order) {
534  if ((ret = deinvert_buffer(s, src, size)) < 0) {
535  av_free(zbuf);
536  return ret;
537  }
538  src = s->deinvert_buf;
539  }
540  ret = tiff_uncompress(zbuf, &outlen, src, size);
541  if (ret != Z_OK) {
542  av_log(s->avctx, AV_LOG_ERROR,
543  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
544  (unsigned long)width * lines, ret);
545  av_free(zbuf);
546  return AVERROR_UNKNOWN;
547  }
548  src = zbuf;
549  for (line = 0; line < lines; line++) {
550  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
551  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
552  } else {
553  memcpy(dst, src, width);
554  }
555  if (is_yuv) {
556  unpack_yuv(s, p, dst, strip_start + line);
557  line += s->subsampling[1] - 1;
558  }
559  dst += stride;
560  src += width;
561  }
562  av_free(zbuf);
563  return 0;
564 }
565 #endif
566 
567 #if CONFIG_LZMA
568 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
569  int size)
570 {
571  lzma_stream stream = LZMA_STREAM_INIT;
572  lzma_ret ret;
573 
574  stream.next_in = src;
575  stream.avail_in = size;
576  stream.next_out = dst;
577  stream.avail_out = *len;
578  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
579  if (ret != LZMA_OK) {
580  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
581  return ret;
582  }
583  ret = lzma_code(&stream, LZMA_RUN);
584  lzma_end(&stream);
585  *len = stream.total_out;
586  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
587 }
588 
589 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
590  const uint8_t *src, int size, int width, int lines,
591  int strip_start, int is_yuv)
592 {
593  uint64_t outlen = width * (uint64_t)lines;
594  int ret, line;
595  uint8_t *buf = av_malloc(outlen);
596  if (!buf)
597  return AVERROR(ENOMEM);
598  if (s->fill_order) {
599  if ((ret = deinvert_buffer(s, src, size)) < 0) {
600  av_free(buf);
601  return ret;
602  }
603  src = s->deinvert_buf;
604  }
605  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
606  if (ret != LZMA_OK) {
607  av_log(s->avctx, AV_LOG_ERROR,
608  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
609  (uint64_t)width * lines, ret);
610  av_free(buf);
611  return AVERROR_UNKNOWN;
612  }
613  src = buf;
614  for (line = 0; line < lines; line++) {
615  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
616  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
617  } else {
618  memcpy(dst, src, width);
619  }
620  if (is_yuv) {
621  unpack_yuv(s, p, dst, strip_start + line);
622  line += s->subsampling[1] - 1;
623  }
624  dst += stride;
625  src += width;
626  }
627  av_free(buf);
628  return 0;
629 }
630 #endif
631 
632 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
633  const uint8_t *src, int size, int width, int lines)
634 {
635  int line;
636  int ret;
637 
638  if (s->fill_order) {
639  if ((ret = deinvert_buffer(s, src, size)) < 0)
640  return ret;
641  src = s->deinvert_buf;
642  }
643  ret = ff_ccitt_unpack(s->avctx, src, size, dst, lines, stride,
644  s->compr, s->fax_opts);
645  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
646  for (line = 0; line < lines; line++) {
647  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
648  dst += stride;
649  }
650  return ret;
651 }
652 
654  int tile_byte_count, int dst_x, int dst_y, int w, int h)
655 {
656  TiffContext *s = avctx->priv_data;
657  uint8_t *dst_data, *src_data;
658  uint32_t dst_offset; /* offset from dst buffer in pixels */
659  int is_single_comp, is_u16, pixel_size;
660  int ret;
661 
662  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
663  return AVERROR_INVALIDDATA;
664 
665  /* Prepare a packet and send to the MJPEG decoder */
666  av_packet_unref(s->jpkt);
667  s->jpkt->data = (uint8_t*)s->gb.buffer;
668  s->jpkt->size = tile_byte_count;
669 
670  if (s->is_bayer) {
671  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
672  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
673  image or not from its own data (and we need that information when decoding it). */
674  mjpegdecctx->bayer = 1;
675  }
676 
677  ret = avcodec_send_packet(s->avctx_mjpeg, s->jpkt);
678  if (ret < 0) {
679  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
680  return ret;
681  }
682 
683  ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
684  if (ret < 0) {
685  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
686 
687  /* Normally skip, error if explode */
688  if (avctx->err_recognition & AV_EF_EXPLODE)
689  return AVERROR_INVALIDDATA;
690  else
691  return 0;
692  }
693 
694  is_u16 = (s->bpp > 8);
695 
696  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
697 
698  if (s->jpgframe->width != s->avctx_mjpeg->width ||
699  s->jpgframe->height != s->avctx_mjpeg->height ||
700  s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
701  return AVERROR_INVALIDDATA;
702 
703  /* See dng_blit for explanation */
704  if (s->avctx_mjpeg->width == w * 2 &&
705  s->avctx_mjpeg->height == h / 2 &&
706  s->avctx_mjpeg->pix_fmt == AV_PIX_FMT_GRAY16LE) {
707  is_single_comp = 1;
708  } else if (s->avctx_mjpeg->width >= w &&
709  s->avctx_mjpeg->height >= h &&
710  s->avctx_mjpeg->pix_fmt == (is_u16 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8)
711  ) {
712  is_single_comp = 0;
713  } else
714  return AVERROR_INVALIDDATA;
715 
716  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
717 
718  if (is_single_comp && !is_u16) {
719  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
720  av_frame_unref(s->jpgframe);
721  return AVERROR_PATCHWELCOME;
722  }
723 
724  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
725  dst_data = frame->data[0] + dst_offset * pixel_size;
726  src_data = s->jpgframe->data[0];
727 
728  dng_blit(s,
729  dst_data,
730  frame->linesize[0] / pixel_size,
731  src_data,
732  s->jpgframe->linesize[0] / pixel_size,
733  w,
734  h,
735  is_single_comp,
736  is_u16, 0);
737 
738  av_frame_unref(s->jpgframe);
739 
740  return 0;
741 }
742 
743 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
744  const uint8_t *src, int size, int strip_start, int lines)
745 {
746  PutByteContext pb;
747  int c, line, pixels, code, ret;
748  const uint8_t *ssrc = src;
749  int width = ((s->width * s->bpp) + 7) >> 3;
751  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
752  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
753  desc->nb_components >= 3;
754  int is_dng;
755 
756  if (s->planar)
757  width /= s->bppcount;
758 
759  if (size <= 0)
760  return AVERROR_INVALIDDATA;
761 
762  if (is_yuv) {
763  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
764  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
765  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
766  if (s->yuv_line == NULL) {
767  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
768  return AVERROR(ENOMEM);
769  }
770  dst = s->yuv_line;
771  stride = 0;
772 
773  width = (s->width - 1) / s->subsampling[0] + 1;
774  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
775  av_assert0(width <= bytes_per_row);
776  av_assert0(s->bpp == 24);
777  }
778  if (s->is_bayer) {
779  av_assert0(width == (s->bpp * s->width + 7) >> 3);
780  }
781  av_assert0(!(s->is_bayer && is_yuv));
782  if (p->format == AV_PIX_FMT_GRAY12) {
783  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
784  if (s->yuv_line == NULL) {
785  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
786  return AVERROR(ENOMEM);
787  }
788  dst = s->yuv_line;
789  stride = 0;
790  }
791 
792  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
793 #if CONFIG_ZLIB
794  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
795  strip_start, is_yuv);
796 #else
797  av_log(s->avctx, AV_LOG_ERROR,
798  "zlib support not enabled, "
799  "deflate compression not supported\n");
800  return AVERROR(ENOSYS);
801 #endif
802  }
803  if (s->compr == TIFF_LZMA) {
804 #if CONFIG_LZMA
805  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
806  strip_start, is_yuv);
807 #else
808  av_log(s->avctx, AV_LOG_ERROR,
809  "LZMA support not enabled\n");
810  return AVERROR(ENOSYS);
811 #endif
812  }
813  if (s->compr == TIFF_LZW) {
814  if (s->fill_order) {
815  if ((ret = deinvert_buffer(s, src, size)) < 0)
816  return ret;
817  ssrc = src = s->deinvert_buf;
818  }
819  if (size > 1 && !src[0] && (src[1]&1)) {
820  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
821  }
822  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
823  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
824  return ret;
825  }
826  for (line = 0; line < lines; line++) {
827  pixels = ff_lzw_decode(s->lzw, dst, width);
828  if (pixels < width) {
829  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
830  pixels, width);
831  return AVERROR_INVALIDDATA;
832  }
833  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
834  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
835  if (is_yuv) {
836  unpack_yuv(s, p, dst, strip_start + line);
837  line += s->subsampling[1] - 1;
838  } else if (p->format == AV_PIX_FMT_GRAY12) {
839  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
840  }
841  dst += stride;
842  }
843  return 0;
844  }
845  if (s->compr == TIFF_CCITT_RLE ||
846  s->compr == TIFF_G3 ||
847  s->compr == TIFF_G4) {
848  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
849  return AVERROR_INVALIDDATA;
850 
851  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
852  }
853 
854  bytestream2_init(&s->gb, src, size);
855  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
856 
857  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
858 
859  /* Decode JPEG-encoded DNGs with strips */
860  if (s->compr == TIFF_NEWJPEG && is_dng) {
861  if (s->strips > 1) {
862  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
863  return AVERROR_PATCHWELCOME;
864  }
865  if (!s->is_bayer)
866  return AVERROR_PATCHWELCOME;
867  if ((ret = dng_decode_jpeg(s->avctx, p, s->stripsize, 0, 0, s->width, s->height)) < 0)
868  return ret;
869  return 0;
870  }
871 
872  if (is_dng && stride == 0)
873  return AVERROR_INVALIDDATA;
874 
875  for (line = 0; line < lines; line++) {
876  if (src - ssrc > size) {
877  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
878  return AVERROR_INVALIDDATA;
879  }
880 
881  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
882  break;
883  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
884  switch (s->compr) {
885  case TIFF_RAW:
886  if (ssrc + size - src < width)
887  return AVERROR_INVALIDDATA;
888 
889  if (!s->fill_order) {
890  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
891  dst, 1, src, 0, width, 0);
892  } else {
893  int i;
894  for (i = 0; i < width; i++)
895  dst[i] = ff_reverse[src[i]];
896  }
897 
898  /* Color processing for DNG images with uncompressed strips (non-tiled) */
899  if (is_dng) {
900  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
901 
902  is_u16 = (s->bpp / s->bppcount > 8);
903  pixel_size_bits = (is_u16 ? 16 : 8);
904  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
905 
906  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
907  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
908  dng_blit(s,
909  dst,
910  0, // no stride, only 1 line
911  dst,
912  0, // no stride, only 1 line
913  elements,
914  1,
915  0, // single-component variation is only preset in JPEG-encoded DNGs
916  is_u16,
917  (line + strip_start)&1);
918  }
919 
920  src += width;
921  break;
922  case TIFF_PACKBITS:
923  for (pixels = 0; pixels < width;) {
924  if (ssrc + size - src < 2) {
925  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
926  return AVERROR_INVALIDDATA;
927  }
928  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
929  if (code >= 0) {
930  code++;
931  if (pixels + code > width ||
932  ssrc + size - src < code) {
933  av_log(s->avctx, AV_LOG_ERROR,
934  "Copy went out of bounds\n");
935  return AVERROR_INVALIDDATA;
936  }
937  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
938  dst, 1, src, 0, code, pixels);
939  src += code;
940  pixels += code;
941  } else if (code != -128) { // -127..-1
942  code = (-code) + 1;
943  if (pixels + code > width) {
944  av_log(s->avctx, AV_LOG_ERROR,
945  "Run went out of bounds\n");
946  return AVERROR_INVALIDDATA;
947  }
948  c = *src++;
949  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
950  dst, 0, NULL, c, code, pixels);
951  pixels += code;
952  }
953  }
954  if (s->fill_order) {
955  int i;
956  for (i = 0; i < width; i++)
957  dst[i] = ff_reverse[dst[i]];
958  }
959  break;
960  }
961  if (is_yuv) {
962  unpack_yuv(s, p, dst, strip_start + line);
963  line += s->subsampling[1] - 1;
964  } else if (p->format == AV_PIX_FMT_GRAY12) {
965  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
966  }
967  dst += stride;
968  }
969  return 0;
970 }
971 
973  const AVPacket *avpkt)
974 {
975  TiffContext *s = avctx->priv_data;
976  int tile_idx;
977  int tile_offset_offset, tile_offset;
978  int tile_byte_count_offset, tile_byte_count;
979  int tile_count_x, tile_count_y;
980  int tile_width, tile_length;
981  int has_width_leftover, has_height_leftover;
982  int tile_x = 0, tile_y = 0;
983  int pos_x = 0, pos_y = 0;
984  int ret;
985 
986  if (s->tile_width <= 0 || s->tile_length <= 0)
987  return AVERROR_INVALIDDATA;
988 
989  has_width_leftover = (s->width % s->tile_width != 0);
990  has_height_leftover = (s->height % s->tile_length != 0);
991 
992  /* Calculate tile counts (round up) */
993  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
994  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
995 
996  /* Iterate over the number of tiles */
997  for (tile_idx = 0; tile_idx < tile_count_x * tile_count_y; tile_idx++) {
998  tile_x = tile_idx % tile_count_x;
999  tile_y = tile_idx / tile_count_x;
1000 
1001  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
1002  tile_width = s->width % s->tile_width;
1003  else
1004  tile_width = s->tile_width;
1005 
1006  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
1007  tile_length = s->height % s->tile_length;
1008  else
1009  tile_length = s->tile_length;
1010 
1011  /* Read tile offset */
1012  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1013  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1014  tile_offset = ff_tget_long(&s->gb, s->le);
1015 
1016  /* Read tile byte size */
1017  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1018  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1019  tile_byte_count = ff_tget_long(&s->gb, s->le);
1020 
1021  /* Seek to tile data */
1022  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1023 
1024  /* Decode JPEG tile and copy it in the reference frame */
1025  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1026 
1027  if (ret < 0)
1028  return ret;
1029 
1030  /* Advance current positions */
1031  pos_x += tile_width;
1032  if (tile_x == tile_count_x - 1) { // If on the right edge
1033  pos_x = 0;
1034  pos_y += tile_length;
1035  }
1036  }
1037 
1038  /* Frame is ready to be output */
1039  frame->pict_type = AV_PICTURE_TYPE_I;
1040  frame->flags |= AV_FRAME_FLAG_KEY;
1041 
1042  return avpkt->size;
1043 }
1044 
1046 {
1047  int ret;
1048  int create_gray_palette = 0;
1049 
1050  // make sure there is no aliasing in the following switch
1051  if (s->bpp > 128 || s->bppcount >= 10) {
1052  av_log(s->avctx, AV_LOG_ERROR,
1053  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1054  s->bpp, s->bppcount);
1055  return AVERROR_INVALIDDATA;
1056  }
1057 
1058  switch (s->planar * 10000 + s->bpp * 10 + s->bppcount + s->is_bayer * 100000) {
1059  case 11:
1060  if (!s->palette_is_set) {
1061  s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
1062  break;
1063  }
1064  case 21:
1065  case 41:
1066  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
1067  if (!s->palette_is_set) {
1068  create_gray_palette = 1;
1069  }
1070  break;
1071  case 81:
1072  s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
1073  break;
1074  case 121:
1075  s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
1076  break;
1077  case 100081:
1078  switch (AV_RL32(s->pattern)) {
1079  case 0x02010100:
1080  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
1081  break;
1082  case 0x00010102:
1083  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
1084  break;
1085  case 0x01000201:
1086  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
1087  break;
1088  case 0x01020001:
1089  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
1090  break;
1091  default:
1092  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1093  AV_RL32(s->pattern));
1094  return AVERROR_PATCHWELCOME;
1095  }
1096  break;
1097  case 100101:
1098  case 100121:
1099  case 100141:
1100  case 100161:
1101  switch (AV_RL32(s->pattern)) {
1102  case 0x02010100:
1103  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
1104  break;
1105  case 0x00010102:
1106  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
1107  break;
1108  case 0x01000201:
1109  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
1110  break;
1111  case 0x01020001:
1112  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
1113  break;
1114  default:
1115  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1116  AV_RL32(s->pattern));
1117  return AVERROR_PATCHWELCOME;
1118  }
1119  break;
1120  case 243:
1121  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1122  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1123  s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1124  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1125  s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1126  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1127  s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
1128  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1129  s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
1130  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1131  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1132  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1133  s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
1134  } else {
1135  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1136  return AVERROR_PATCHWELCOME;
1137  }
1138  } else
1139  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
1140  break;
1141  case 161:
1142  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
1143  break;
1144  case 162:
1145  s->avctx->pix_fmt = AV_PIX_FMT_YA8;
1146  break;
1147  case 322:
1148  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
1149  break;
1150  case 324:
1151  s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
1152  break;
1153  case 405:
1154  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
1155  s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
1156  else {
1157  av_log(s->avctx, AV_LOG_ERROR,
1158  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1159  return AVERROR_PATCHWELCOME;
1160  }
1161  break;
1162  case 483:
1163  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
1164  break;
1165  case 644:
1166  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
1167  break;
1168  case 10243:
1169  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
1170  break;
1171  case 10324:
1172  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1173  break;
1174  case 10483:
1175  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
1176  break;
1177  case 10644:
1178  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
1179  break;
1180  case 963:
1181  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBF32LE : AV_PIX_FMT_RGBF32BE;
1182  break;
1183  case 1284:
1184  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBAF32LE : AV_PIX_FMT_RGBAF32BE;
1185  break;
1186  case 10963:
1187  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRPF32LE : AV_PIX_FMT_GBRPF32BE;
1188  break;
1189  case 11284:
1190  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAPF32LE : AV_PIX_FMT_GBRAPF32BE;
1191  break;
1192  default:
1193  av_log(s->avctx, AV_LOG_ERROR,
1194  "This format is not supported (bpp=%d, bppcount=%d)\n",
1195  s->bpp, s->bppcount);
1196  return AVERROR_INVALIDDATA;
1197  }
1198 
1199  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1200  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1201  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1202  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1203  desc->nb_components < 3) {
1204  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1205  return AVERROR_INVALIDDATA;
1206  }
1207  }
1208 
1209  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1210  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1211  if (ret < 0)
1212  return ret;
1213  }
1214 
1215  if (s->avctx->skip_frame >= AVDISCARD_ALL)
1216  return 0;
1217 
1218  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1219  return ret;
1220  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1221  if (!create_gray_palette)
1222  memcpy(frame->data[1], s->palette, sizeof(s->palette));
1223  else {
1224  /* make default grayscale pal */
1225  int i;
1226  uint32_t *pal = (uint32_t *)frame->data[1];
1227  for (i = 0; i < 1<<s->bpp; i++)
1228  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1229  }
1230  }
1231  return 1;
1232 }
1233 
1234 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1235 {
1236  int offset = tag == TIFF_YRES ? 2 : 0;
1237  s->res[offset++] = num;
1238  s->res[offset] = den;
1239  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1240  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1241  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1242  if (num > INT64_MAX || den > INT64_MAX) {
1243  num = num >> 1;
1244  den = den >> 1;
1245  }
1246  av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
1247  num, den, INT32_MAX);
1248  if (!s->avctx->sample_aspect_ratio.den)
1249  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1250  }
1251 }
1252 
1254 {
1255  AVFrameSideData *sd;
1256  GetByteContext gb_temp;
1257  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1258  int i, start;
1259  int pos;
1260  int ret;
1261  double *dp;
1262 
1263  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1264  if (ret < 0) {
1265  goto end;
1266  }
1267  if (tag <= s->last_tag)
1268  return AVERROR_INVALIDDATA;
1269 
1270  // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
1271  if (tag != TIFF_STRIP_SIZE)
1272  s->last_tag = tag;
1273 
1274  off = bytestream2_tell(&s->gb);
1275  if (count == 1) {
1276  switch (type) {
1277  case AV_TIFF_BYTE:
1278  case AV_TIFF_SHORT:
1279  case AV_TIFF_LONG:
1280  value = ff_tget(&s->gb, type, s->le);
1281  break;
1282  case AV_TIFF_RATIONAL:
1283  value = ff_tget_long(&s->gb, s->le);
1284  value2 = ff_tget_long(&s->gb, s->le);
1285  if (!value2) {
1286  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator in rational\n");
1287  value2 = 1;
1288  }
1289 
1290  break;
1291  case AV_TIFF_STRING:
1292  if (count <= 4) {
1293  break;
1294  }
1295  default:
1296  value = UINT_MAX;
1297  }
1298  }
1299 
1300  switch (tag) {
1301  case TIFF_SUBFILE:
1302  s->is_thumbnail = (value != 0);
1303  break;
1304  case TIFF_WIDTH:
1305  if (value > INT_MAX)
1306  return AVERROR_INVALIDDATA;
1307  s->width = value;
1308  break;
1309  case TIFF_HEIGHT:
1310  if (value > INT_MAX)
1311  return AVERROR_INVALIDDATA;
1312  s->height = value;
1313  break;
1314  case TIFF_BPP:
1315  if (count > 5 || count <= 0) {
1316  av_log(s->avctx, AV_LOG_ERROR,
1317  "This format is not supported (bpp=%d, %d components)\n",
1318  value, count);
1319  return AVERROR_INVALIDDATA;
1320  }
1321  s->bppcount = count;
1322  if (count == 1)
1323  s->bpp = value;
1324  else {
1325  switch (type) {
1326  case AV_TIFF_BYTE:
1327  case AV_TIFF_SHORT:
1328  case AV_TIFF_LONG:
1329  s->bpp = 0;
1330  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1331  return AVERROR_INVALIDDATA;
1332  for (i = 0; i < count; i++)
1333  s->bpp += ff_tget(&s->gb, type, s->le);
1334  break;
1335  default:
1336  s->bpp = -1;
1337  }
1338  }
1339  break;
1341  if (count != 1) {
1342  av_log(s->avctx, AV_LOG_ERROR,
1343  "Samples per pixel requires a single value, many provided\n");
1344  return AVERROR_INVALIDDATA;
1345  }
1346  if (value > 5 || value <= 0) {
1347  av_log(s->avctx, AV_LOG_ERROR,
1348  "Invalid samples per pixel %d\n", value);
1349  return AVERROR_INVALIDDATA;
1350  }
1351  if (s->bppcount == 1)
1352  s->bpp *= value;
1353  s->bppcount = value;
1354  break;
1355  case TIFF_COMPR:
1356  s->compr = value;
1357  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1358  s->predictor = 0;
1359  switch (s->compr) {
1360  case TIFF_RAW:
1361  case TIFF_PACKBITS:
1362  case TIFF_LZW:
1363  case TIFF_CCITT_RLE:
1364  break;
1365  case TIFF_G3:
1366  case TIFF_G4:
1367  s->fax_opts = 0;
1368  break;
1369  case TIFF_DEFLATE:
1370  case TIFF_ADOBE_DEFLATE:
1371 #if CONFIG_ZLIB
1372  break;
1373 #else
1374  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1375  return AVERROR(ENOSYS);
1376 #endif
1377  case TIFF_JPEG:
1378  case TIFF_NEWJPEG:
1379  s->is_jpeg = 1;
1380  break;
1381  case TIFF_LZMA:
1382 #if CONFIG_LZMA
1383  break;
1384 #else
1385  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1386  return AVERROR(ENOSYS);
1387 #endif
1388  default:
1389  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1390  s->compr);
1391  return AVERROR_INVALIDDATA;
1392  }
1393  break;
1394  case TIFF_ROWSPERSTRIP:
1395  if (!value || (type == AV_TIFF_LONG && value == UINT_MAX))
1396  value = s->height;
1397  s->rps = FFMIN(value, s->height);
1398  break;
1399  case TIFF_STRIP_OFFS:
1400  if (count == 1) {
1401  if (value > INT_MAX) {
1402  av_log(s->avctx, AV_LOG_ERROR,
1403  "strippos %u too large\n", value);
1404  return AVERROR_INVALIDDATA;
1405  }
1406  s->strippos = 0;
1407  s->stripoff = value;
1408  } else
1409  s->strippos = off;
1410  s->strips = count;
1411  if (s->strips == s->bppcount)
1412  s->rps = s->height;
1413  s->sot = type;
1414  break;
1415  case TIFF_STRIP_SIZE:
1416  if (count == 1) {
1417  if (value > INT_MAX) {
1418  av_log(s->avctx, AV_LOG_ERROR,
1419  "stripsize %u too large\n", value);
1420  return AVERROR_INVALIDDATA;
1421  }
1422  s->stripsizesoff = 0;
1423  s->stripsize = value;
1424  s->strips = 1;
1425  } else {
1426  s->stripsizesoff = off;
1427  }
1428  s->strips = count;
1429  s->sstype = type;
1430  break;
1431  case TIFF_XRES:
1432  case TIFF_YRES:
1433  set_sar(s, tag, value, value2);
1434  break;
1435  case TIFF_TILE_OFFSETS:
1436  s->tile_offsets_offset = off;
1437  s->is_tiled = 1;
1438  break;
1439  case TIFF_TILE_BYTE_COUNTS:
1440  s->tile_byte_counts_offset = off;
1441  break;
1442  case TIFF_TILE_LENGTH:
1443  if (value > INT_MAX)
1444  return AVERROR_INVALIDDATA;
1445  s->tile_length = value;
1446  break;
1447  case TIFF_TILE_WIDTH:
1448  if (value > INT_MAX)
1449  return AVERROR_INVALIDDATA;
1450  s->tile_width = value;
1451  break;
1452  case TIFF_PREDICTOR:
1453  if (value > INT_MAX)
1454  return AVERROR_INVALIDDATA;
1455  s->predictor = value;
1456  break;
1457  case TIFF_SUB_IFDS:
1458  if (count == 1)
1459  s->sub_ifd = value;
1460  else if (count > 1)
1461  s->sub_ifd = ff_tget_long(&s->gb, s->le); /** Only get the first SubIFD */
1462  break;
1465  if (count < 1 || count > FF_ARRAY_ELEMS(s->dng_lut))
1466  return AVERROR_INVALIDDATA;
1467  for (int i = 0; i < count; i++)
1468  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1469  s->white_level = s->dng_lut[count-1];
1470  break;
1471  case DNG_BLACK_LEVEL:
1472  if (count > FF_ARRAY_ELEMS(s->black_level))
1473  return AVERROR_INVALIDDATA;
1474  s->black_level[0] = value / (float)value2;
1475  for (int i = 0; i < count && count > 1; i++) {
1476  if (type == AV_TIFF_RATIONAL) {
1477  value = ff_tget_long(&s->gb, s->le);
1478  value2 = ff_tget_long(&s->gb, s->le);
1479  if (!value2) {
1480  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1481  value2 = 1;
1482  }
1483 
1484  s->black_level[i] = value / (float)value2;
1485  } else if (type == AV_TIFF_SRATIONAL) {
1486  int value = ff_tget_long(&s->gb, s->le);
1487  int value2 = ff_tget_long(&s->gb, s->le);
1488  if (!value2) {
1489  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1490  value2 = 1;
1491  }
1492 
1493  s->black_level[i] = value / (float)value2;
1494  } else {
1495  s->black_level[i] = ff_tget(&s->gb, type, s->le);
1496  }
1497  }
1498  for (int i = count; i < 4 && count > 0; i++)
1499  s->black_level[i] = s->black_level[count - 1];
1500  break;
1501  case DNG_WHITE_LEVEL:
1502  s->white_level = value;
1503  break;
1504  case TIFF_CFA_PATTERN_DIM:
1505  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1506  ff_tget(&s->gb, type, s->le) != 2)) {
1507  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1508  return AVERROR_INVALIDDATA;
1509  }
1510  break;
1511  case TIFF_CFA_PATTERN:
1512  s->is_bayer = 1;
1513  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1514  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1515  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1516  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1517  break;
1518  case TIFF_PHOTOMETRIC:
1519  switch (value) {
1522  case TIFF_PHOTOMETRIC_RGB:
1526  case TIFF_PHOTOMETRIC_CFA:
1527  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1528  s->photometric = value;
1529  break;
1537  "PhotometricInterpretation 0x%04X",
1538  value);
1539  return AVERROR_PATCHWELCOME;
1540  default:
1541  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1542  "unknown\n", value);
1543  return AVERROR_INVALIDDATA;
1544  }
1545  break;
1546  case TIFF_FILL_ORDER:
1547  if (value < 1 || value > 2) {
1548  av_log(s->avctx, AV_LOG_ERROR,
1549  "Unknown FillOrder value %d, trying default one\n", value);
1550  value = 1;
1551  }
1552  s->fill_order = value - 1;
1553  break;
1554  case TIFF_PAL: {
1555  GetByteContext pal_gb[3];
1556  off = type_sizes[type];
1557  if (count / 3 > 256 ||
1558  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1559  return AVERROR_INVALIDDATA;
1560 
1561  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1562  bytestream2_skip(&pal_gb[1], count / 3 * off);
1563  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1564 
1565  off = (type_sizes[type] - 1) << 3;
1566  if (off > 31U) {
1567  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1568  return AVERROR_INVALIDDATA;
1569  }
1570 
1571  for (i = 0; i < count / 3; i++) {
1572  uint32_t p = 0xFF000000;
1573  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1574  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1575  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1576  s->palette[i] = p;
1577  }
1578  s->palette_is_set = 1;
1579  break;
1580  }
1581  case TIFF_PLANAR:
1582  s->planar = value == 2;
1583  break;
1585  if (count != 2) {
1586  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1587  return AVERROR_INVALIDDATA;
1588  }
1589  for (i = 0; i < count; i++) {
1590  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1591  if (s->subsampling[i] <= 0) {
1592  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1593  s->subsampling[i] = 1;
1594  return AVERROR_INVALIDDATA;
1595  }
1596  }
1597  break;
1598  case TIFF_T4OPTIONS:
1599  if (s->compr == TIFF_G3) {
1600  if (value > INT_MAX)
1601  return AVERROR_INVALIDDATA;
1602  s->fax_opts = value;
1603  }
1604  break;
1605  case TIFF_T6OPTIONS:
1606  if (s->compr == TIFF_G4) {
1607  if (value > INT_MAX)
1608  return AVERROR_INVALIDDATA;
1609  s->fax_opts = value;
1610  }
1611  break;
1612 #define ADD_METADATA(count, name, sep)\
1613  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1614  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1615  goto end;\
1616  }
1618  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1619  break;
1621  ADD_METADATA(count, "ModelTransformationTag", NULL);
1622  break;
1623  case TIFF_MODEL_TIEPOINT:
1624  ADD_METADATA(count, "ModelTiepointTag", NULL);
1625  break;
1627  if (s->geotag_count) {
1628  avpriv_request_sample(s->avctx, "Multiple geo key directories");
1629  return AVERROR_INVALIDDATA;
1630  }
1631  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1632  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1633  s->geotag_count = ff_tget_short(&s->gb, s->le);
1634  if (s->geotag_count > count / 4 - 1) {
1635  s->geotag_count = count / 4 - 1;
1636  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1637  }
1638  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1639  || s->geotag_count == 0) {
1640  s->geotag_count = 0;
1641  return -1;
1642  }
1643  s->geotags = av_calloc(s->geotag_count, sizeof(*s->geotags));
1644  if (!s->geotags) {
1645  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1646  s->geotag_count = 0;
1647  goto end;
1648  }
1649  for (i = 0; i < s->geotag_count; i++) {
1650  unsigned val;
1651  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1652  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1653  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1654  val = ff_tget_short(&s->gb, s->le);
1655 
1656  if (!s->geotags[i].type) {
1657  const char *str = get_geokey_val(s->geotags[i].key, val);
1658 
1659  s->geotags[i].val = str ? av_strdup(str) : av_asprintf("Unknown-%u", val);
1660  if (!s->geotags[i].val)
1661  return AVERROR(ENOMEM);
1662  } else
1663  s->geotags[i].offset = val;
1664  }
1665  break;
1667  if (count >= INT_MAX / sizeof(int64_t))
1668  return AVERROR_INVALIDDATA;
1669  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1670  return AVERROR_INVALIDDATA;
1671  dp = av_malloc_array(count, sizeof(double));
1672  if (!dp) {
1673  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1674  goto end;
1675  }
1676  for (i = 0; i < count; i++)
1677  dp[i] = ff_tget_double(&s->gb, s->le);
1678  for (i = 0; i < s->geotag_count; i++) {
1679  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1680  if (s->geotags[i].count == 0
1681  || s->geotags[i].offset + s->geotags[i].count > count) {
1682  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1683  } else if (s->geotags[i].val) {
1684  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1685  } else {
1686  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1687  if (!ap) {
1688  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1689  av_freep(&dp);
1690  return AVERROR(ENOMEM);
1691  }
1692  s->geotags[i].val = ap;
1693  }
1694  }
1695  }
1696  av_freep(&dp);
1697  break;
1698  case TIFF_GEO_ASCII_PARAMS:
1699  pos = bytestream2_tell(&s->gb);
1700  for (i = 0; i < s->geotag_count; i++) {
1701  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1702  if (s->geotags[i].count == 0
1703  || s->geotags[i].offset + s->geotags[i].count > count) {
1704  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1705  } else {
1706  char *ap;
1707 
1708  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1709  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1710  return AVERROR_INVALIDDATA;
1711  if (s->geotags[i].val)
1712  return AVERROR_INVALIDDATA;
1713  ap = av_malloc(s->geotags[i].count);
1714  if (!ap) {
1715  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1716  return AVERROR(ENOMEM);
1717  }
1718  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1719  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1720  s->geotags[i].val = ap;
1721  }
1722  }
1723  }
1724  break;
1725  case TIFF_ICC_PROFILE:
1726  gb_temp = s->gb;
1727  bytestream2_seek(&gb_temp, off, SEEK_SET);
1728 
1729  if (bytestream2_get_bytes_left(&gb_temp) < count)
1730  return AVERROR_INVALIDDATA;
1731 
1733  if (ret < 0)
1734  return ret;
1735  if (sd)
1736  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1737  break;
1738  case TIFF_ARTIST:
1739  ADD_METADATA(count, "artist", NULL);
1740  break;
1741  case TIFF_COPYRIGHT:
1742  ADD_METADATA(count, "copyright", NULL);
1743  break;
1744  case TIFF_DATE:
1745  ADD_METADATA(count, "date", NULL);
1746  break;
1747  case TIFF_DOCUMENT_NAME:
1748  ADD_METADATA(count, "document_name", NULL);
1749  break;
1750  case TIFF_HOST_COMPUTER:
1751  ADD_METADATA(count, "computer", NULL);
1752  break;
1754  ADD_METADATA(count, "description", NULL);
1755  break;
1756  case TIFF_MAKE:
1757  ADD_METADATA(count, "make", NULL);
1758  break;
1759  case TIFF_MODEL:
1760  ADD_METADATA(count, "model", NULL);
1761  break;
1762  case TIFF_PAGE_NAME:
1763  ADD_METADATA(count, "page_name", NULL);
1764  break;
1765  case TIFF_PAGE_NUMBER:
1766  ADD_METADATA(count, "page_number", " / ");
1767  // need to seek back to re-read the page number
1768  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1769  // read the page number
1770  s->cur_page = ff_tget_short(&s->gb, s->le);
1771  // get back to where we were before the previous seek
1772  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1773  break;
1774  case TIFF_SOFTWARE_NAME:
1775  ADD_METADATA(count, "software", NULL);
1776  break;
1777  case DNG_VERSION:
1778  if (count == 4) {
1779  unsigned int ver[4];
1780  ver[0] = ff_tget(&s->gb, type, s->le);
1781  ver[1] = ff_tget(&s->gb, type, s->le);
1782  ver[2] = ff_tget(&s->gb, type, s->le);
1783  ver[3] = ff_tget(&s->gb, type, s->le);
1784 
1785  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1786  ver[0], ver[1], ver[2], ver[3]);
1787 
1789  }
1790  break;
1791  case DNG_ANALOG_BALANCE:
1792  if (type != AV_TIFF_RATIONAL)
1793  break;
1794 
1795  for (int i = 0; i < 3; i++) {
1796  value = ff_tget_long(&s->gb, s->le);
1797  value2 = ff_tget_long(&s->gb, s->le);
1798  if (!value2) {
1799  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1800  value2 = 1;
1801  }
1802 
1803  s->analog_balance[i] = value / (float)value2;
1804  }
1805  break;
1806  case DNG_AS_SHOT_NEUTRAL:
1807  if (type != AV_TIFF_RATIONAL)
1808  break;
1809 
1810  for (int i = 0; i < 3; i++) {
1811  value = ff_tget_long(&s->gb, s->le);
1812  value2 = ff_tget_long(&s->gb, s->le);
1813  if (!value2) {
1814  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1815  value2 = 1;
1816  }
1817 
1818  s->as_shot_neutral[i] = value / (float)value2;
1819  }
1820  break;
1821  case DNG_AS_SHOT_WHITE_XY:
1822  if (type != AV_TIFF_RATIONAL)
1823  break;
1824 
1825  for (int i = 0; i < 2; i++) {
1826  value = ff_tget_long(&s->gb, s->le);
1827  value2 = ff_tget_long(&s->gb, s->le);
1828  if (!value2) {
1829  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1830  value2 = 1;
1831  }
1832 
1833  s->as_shot_white[i] = value / (float)value2;
1834  }
1835  s->as_shot_white[2] = 1.f - s->as_shot_white[0] - s->as_shot_white[1];
1836  for (int i = 0; i < 3; i++) {
1837  s->as_shot_white[i] /= d65_white[i];
1838  }
1839  break;
1840  case DNG_COLOR_MATRIX1:
1841  case DNG_COLOR_MATRIX2:
1842  for (int i = 0; i < 3; i++) {
1843  for (int j = 0; j < 3; j++) {
1844  int value = ff_tget_long(&s->gb, s->le);
1845  int value2 = ff_tget_long(&s->gb, s->le);
1846  if (!value2) {
1847  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1848  value2 = 1;
1849  }
1850  s->color_matrix[i][j] = value / (float)value2;
1851  }
1852  s->use_color_matrix = 1;
1853  }
1854  break;
1857  for (int i = 0; i < 3; i++) {
1858  for (int j = 0; j < 3; j++) {
1859  int value = ff_tget_long(&s->gb, s->le);
1860  int value2 = ff_tget_long(&s->gb, s->le);
1861  if (!value2) {
1862  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1863  value2 = 1;
1864  }
1865  s->camera_calibration[i][j] = value / (float)value2;
1866  }
1867  }
1868  break;
1869  case CINEMADNG_TIME_CODES:
1870  case CINEMADNG_FRAME_RATE:
1871  case CINEMADNG_T_STOP:
1872  case CINEMADNG_REEL_NAME:
1875  break;
1876  default:
1877  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1878  av_log(s->avctx, AV_LOG_ERROR,
1879  "Unknown or unsupported tag %d/0x%0X\n",
1880  tag, tag);
1881  return AVERROR_INVALIDDATA;
1882  }
1883  }
1884 end:
1885  if (s->bpp > 128U) {
1886  av_log(s->avctx, AV_LOG_ERROR,
1887  "This format is not supported (bpp=%d, %d components)\n",
1888  s->bpp, count);
1889  s->bpp = 0;
1890  return AVERROR_INVALIDDATA;
1891  }
1892  bytestream2_seek(&s->gb, start, SEEK_SET);
1893  return 0;
1894 }
1895 
1896 static const float xyz2rgb[3][3] = {
1897  { 0.412453f, 0.357580f, 0.180423f },
1898  { 0.212671f, 0.715160f, 0.072169f },
1899  { 0.019334f, 0.119193f, 0.950227f },
1900 };
1901 
1903  float rgb2cam[3][4],
1904  double cam2xyz[4][3])
1905 {
1906  double cam2rgb[4][3], num;
1907  int i, j, k;
1908 
1909  for (i = 0; i < 3; i++) {
1910  for (j = 0; j < 3; j++) {
1911  cam2rgb[i][j] = 0.;
1912  for (k = 0; k < 3; k++)
1913  cam2rgb[i][j] += cam2xyz[i][k] * xyz2rgb[k][j];
1914  }
1915  }
1916 
1917  for (i = 0; i < 3; i++) {
1918  for (num = j = 0; j < 3; j++)
1919  num += cam2rgb[i][j];
1920  if (!num)
1921  num = 1;
1922  for (j = 0; j < 3; j++)
1923  cam2rgb[i][j] /= num;
1924  s->premultiply[i] = 1.f / num;
1925  }
1926 }
1927 
1928 static int decode_frame(AVCodecContext *avctx, AVFrame *p,
1929  int *got_frame, AVPacket *avpkt)
1930 {
1931  TiffContext *const s = avctx->priv_data;
1932  unsigned off, last_off = 0;
1933  int le, ret, plane, planes;
1934  int i, j, entries, stride;
1935  unsigned soff, ssize;
1936  uint8_t *dst;
1937  GetByteContext stripsizes;
1938  GetByteContext stripdata;
1939  int retry_for_subifd, retry_for_page;
1940  int is_dng;
1941  int has_tile_bits, has_strip_bits;
1942 
1943  av_exif_free(&s->exif_meta);
1944  /* this will not parse the image data */
1945  ret = av_exif_parse_buffer(avctx, avpkt->data, avpkt->size, &s->exif_meta, AV_EXIF_TIFF_HEADER);
1946  if (ret < 0)
1947  av_log(avctx, AV_LOG_ERROR, "could not parse EXIF data: %s\n", av_err2str(ret));
1948 
1949  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1950 
1951  // parse image header
1952  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1953  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1954  return ret;
1955  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1956  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1957  return AVERROR_INVALIDDATA;
1958  }
1959  s->le = le;
1960  // TIFF_BPP is not a required tag and defaults to 1
1961 
1962  s->tiff_type = TIFF_TYPE_TIFF;
1963  s->use_color_matrix = 0;
1964 again:
1965  s->is_thumbnail = 0;
1966  s->bppcount = s->bpp = 1;
1967  s->photometric = TIFF_PHOTOMETRIC_NONE;
1968  s->compr = TIFF_RAW;
1969  s->fill_order = 0;
1970  s->white_level = 0;
1971  s->is_bayer = 0;
1972  s->is_tiled = 0;
1973  s->is_jpeg = 0;
1974  s->cur_page = 0;
1975  s->last_tag = 0;
1976 
1977  for (i = 0; i < 65536; i++)
1978  s->dng_lut[i] = i;
1979 
1980  for (i = 0; i < FF_ARRAY_ELEMS(s->black_level); i++)
1981  s->black_level[i] = 0.f;
1982 
1983  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_neutral); i++)
1984  s->as_shot_neutral[i] = 0.f;
1985 
1986  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_white); i++)
1987  s->as_shot_white[i] = 1.f;
1988 
1989  for (i = 0; i < FF_ARRAY_ELEMS(s->analog_balance); i++)
1990  s->analog_balance[i] = 1.f;
1991 
1992  for (i = 0; i < FF_ARRAY_ELEMS(s->premultiply); i++)
1993  s->premultiply[i] = 1.f;
1994 
1995  for (i = 0; i < 4; i++)
1996  for (j = 0; j < 4; j++)
1997  s->camera_calibration[i][j] = i == j;
1998 
1999  free_geotags(s);
2000 
2001  // Reset these offsets so we can tell if they were set this frame
2002  s->stripsizesoff = s->strippos = 0;
2003  /* parse image file directory */
2004  bytestream2_seek(&s->gb, off, SEEK_SET);
2005  entries = ff_tget_short(&s->gb, le);
2006  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
2007  return AVERROR_INVALIDDATA;
2008  for (i = 0; i < entries; i++) {
2009  if ((ret = tiff_decode_tag(s, p)) < 0)
2010  return ret;
2011  }
2012 
2013  if (s->get_thumbnail && !s->is_thumbnail) {
2014  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
2015  return AVERROR_EOF;
2016  }
2017 
2018  /** whether we should process this IFD's SubIFD */
2019  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
2020  /** whether we should process this multi-page IFD's next page */
2021  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
2022 
2023  if (retry_for_page) {
2024  // set offset to the next IFD
2025  off = ff_tget_long(&s->gb, le);
2026  } else if (retry_for_subifd) {
2027  // set offset to the SubIFD
2028  off = s->sub_ifd;
2029  }
2030 
2031  if (retry_for_subifd || retry_for_page) {
2032  if (!off) {
2033  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
2034  return AVERROR_INVALIDDATA;
2035  }
2036  if (off <= last_off) {
2037  avpriv_request_sample(s->avctx, "non increasing IFD offset");
2038  return AVERROR_INVALIDDATA;
2039  }
2040  last_off = off;
2041  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
2042  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
2043  return AVERROR_INVALIDDATA;
2044  }
2045  s->sub_ifd = 0;
2046  goto again;
2047  }
2048 
2049  /* At this point we've decided on which (Sub)IFD to process */
2050 
2051  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
2052 
2053  for (i = 0; i<s->geotag_count; i++) {
2054  const char *keyname = get_geokey_name(s->geotags[i].key);
2055  if (!keyname) {
2056  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
2057  continue;
2058  }
2059  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
2060  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
2061  continue;
2062  }
2063  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, AV_DICT_DONT_STRDUP_VAL);
2064  s->geotags[i].val = NULL;
2065  if (ret<0) {
2066  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
2067  return ret;
2068  }
2069  }
2070 
2071  if (is_dng) {
2072  double cam2xyz[4][3];
2073  float cmatrix[3][4];
2074  float pmin = FLT_MAX;
2075  int bps;
2076 
2077  for (i = 0; i < 3; i++) {
2078  for (j = 0; j < 3; j++)
2079  s->camera_calibration[i][j] *= s->analog_balance[i];
2080  }
2081 
2082  if (!s->use_color_matrix) {
2083  for (i = 0; i < 3; i++) {
2084  if (s->camera_calibration[i][i])
2085  s->premultiply[i] /= s->camera_calibration[i][i];
2086  }
2087  } else {
2088  for (int c = 0; c < 3; c++) {
2089  for (i = 0; i < 3; i++) {
2090  cam2xyz[c][i] = 0.;
2091  for (j = 0; j < 3; j++)
2092  cam2xyz[c][i] += s->camera_calibration[c][j] * s->color_matrix[j][i] * s->as_shot_white[i];
2093  }
2094  }
2095 
2096  camera_xyz_coeff(s, cmatrix, cam2xyz);
2097  }
2098 
2099  for (int c = 0; c < 3; c++)
2100  pmin = fminf(pmin, s->premultiply[c]);
2101 
2102  for (int c = 0; c < 3; c++)
2103  s->premultiply[c] /= pmin;
2104 
2105  if (s->bpp % s->bppcount)
2106  return AVERROR_INVALIDDATA;
2107  bps = s->bpp / s->bppcount;
2108  if (bps < 8 || bps > 32)
2109  return AVERROR_INVALIDDATA;
2110 
2111  if (s->white_level == 0)
2112  s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
2113 
2114  if (s->white_level <= s->black_level[0]) {
2115  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%g) must be less than WhiteLevel (%"PRId32")\n",
2116  s->black_level[0], s->white_level);
2117  return AVERROR_INVALIDDATA;
2118  }
2119 
2120  if (s->planar)
2121  return AVERROR_PATCHWELCOME;
2122  }
2123 
2124  if (!s->is_tiled && !s->strippos && !s->stripoff) {
2125  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
2126  return AVERROR_INVALIDDATA;
2127  }
2128 
2129  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length;
2130  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
2131 
2132  if (has_tile_bits && has_strip_bits) {
2133  int tiled_dng = s->is_tiled && is_dng;
2134  av_log(avctx, tiled_dng ? AV_LOG_WARNING : AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
2135  if (!tiled_dng)
2136  return AVERROR_INVALIDDATA;
2137  }
2138 
2139  /* now we have the data and may start decoding */
2140  if ((ret = init_image(s, p)) <= 0)
2141  return ret;
2142 
2143  if (!s->is_tiled || has_strip_bits) {
2144  if (s->strips == 1 && !s->stripsize) {
2145  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
2146  s->stripsize = avpkt->size - s->stripoff;
2147  }
2148 
2149  if (s->stripsizesoff) {
2150  if (s->stripsizesoff >= (unsigned)avpkt->size)
2151  return AVERROR_INVALIDDATA;
2152  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
2153  avpkt->size - s->stripsizesoff);
2154  }
2155  if (s->strippos) {
2156  if (s->strippos >= (unsigned)avpkt->size)
2157  return AVERROR_INVALIDDATA;
2158  bytestream2_init(&stripdata, avpkt->data + s->strippos,
2159  avpkt->size - s->strippos);
2160  }
2161 
2162  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
2163  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
2164  return AVERROR_INVALIDDATA;
2165  }
2166  }
2167 
2168  if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
2169  s->photometric == TIFF_PHOTOMETRIC_CFA) {
2171  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
2173  }
2174 
2175  /* Handle DNG images with JPEG-compressed tiles */
2176 
2177  if (is_dng && s->is_tiled) {
2178  if (!s->is_jpeg) {
2179  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
2180  return AVERROR_PATCHWELCOME;
2181  } else if (!s->is_bayer) {
2182  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
2183  return AVERROR_PATCHWELCOME;
2184  } else {
2185  if ((ret = dng_decode_tiles(avctx, p, avpkt)) > 0)
2186  *got_frame = 1;
2187  return ret;
2188  }
2189  }
2190 
2191  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
2192 
2193  planes = s->planar ? s->bppcount : 1;
2194  for (plane = 0; plane < planes; plane++) {
2195  uint8_t *five_planes = NULL;
2196  int remaining = avpkt->size;
2197  int decoded_height;
2198  stride = p->linesize[plane];
2199  dst = p->data[plane];
2200  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2201  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
2202  stride = stride * 5 / 4;
2203  five_planes =
2204  dst = av_malloc(stride * s->height);
2205  if (!dst)
2206  return AVERROR(ENOMEM);
2207  }
2208  for (i = 0; i < s->height; i += s->rps) {
2209  if (i)
2210  dst += s->rps * stride;
2211  if (s->stripsizesoff)
2212  ssize = ff_tget(&stripsizes, s->sstype, le);
2213  else
2214  ssize = s->stripsize;
2215 
2216  if (s->strippos)
2217  soff = ff_tget(&stripdata, s->sot, le);
2218  else
2219  soff = s->stripoff;
2220 
2221  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
2222  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
2223  av_freep(&five_planes);
2224  return AVERROR_INVALIDDATA;
2225  }
2226  remaining -= ssize;
2227  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2228  FFMIN(s->rps, s->height - i))) < 0) {
2229  if (avctx->err_recognition & AV_EF_EXPLODE) {
2230  av_freep(&five_planes);
2231  return ret;
2232  }
2233  break;
2234  }
2235  }
2236  decoded_height = FFMIN(i, s->height);
2237 
2238  if (s->predictor == 2) {
2239  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2240  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2241  return AVERROR_PATCHWELCOME;
2242  }
2243  dst = five_planes ? five_planes : p->data[plane];
2244  soff = s->bpp >> 3;
2245  if (s->planar)
2246  soff = FFMAX(soff / s->bppcount, 1);
2247  ssize = s->width * soff;
2248  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2249  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
2250  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
2251  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2252  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
2253  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
2254  for (i = 0; i < decoded_height; i++) {
2255  for (j = soff; j < ssize; j += 2)
2256  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2257  dst += stride;
2258  }
2259  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2260  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
2261  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
2262  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2263  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
2264  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
2265  for (i = 0; i < decoded_height; i++) {
2266  for (j = soff; j < ssize; j += 2)
2267  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2268  dst += stride;
2269  }
2270  } else {
2271  for (i = 0; i < decoded_height; i++) {
2272  for (j = soff; j < ssize; j++)
2273  dst[j] += dst[j - soff];
2274  dst += stride;
2275  }
2276  }
2277  }
2278 
2279  /* Floating point predictor
2280  TIFF Technical Note 3 http://chriscox.org/TIFFTN3d1.pdf */
2281  if (s->predictor == 3) {
2282  int channels = s->bppcount;
2283  int group_size;
2284  uint8_t *tmpbuf;
2285  int bpc;
2286 
2287  dst = five_planes ? five_planes : p->data[plane];
2288  soff = s->bpp >> 3;
2289  if (s->planar) {
2290  soff = FFMAX(soff / s->bppcount, 1);
2291  channels = 1;
2292  }
2293  ssize = s->width * soff;
2294  bpc = FFMAX(soff / s->bppcount, 1); /* Bytes per component */
2295  group_size = s->width * channels;
2296 
2297  tmpbuf = av_malloc(ssize);
2298  if (!tmpbuf) {
2299  av_free(five_planes);
2300  return AVERROR(ENOMEM);
2301  }
2302 
2303  if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32LE ||
2304  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32LE) {
2305  for (i = 0; i < decoded_height; i++) {
2306  /* Copy first sample byte for each channel */
2307  for (j = 0; j < channels; j++)
2308  tmpbuf[j] = dst[j];
2309 
2310  /* Decode horizontal differences */
2311  for (j = channels; j < ssize; j++)
2312  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2313 
2314  /* Combine shuffled bytes from their separate groups. Each
2315  byte of every floating point value in a row of pixels is
2316  split and combined into separate groups. A group of all
2317  the sign/exponents bytes in the row and groups for each
2318  of the upper, mid, and lower mantissa bytes in the row. */
2319  for (j = 0; j < group_size; j++) {
2320  for (int k = 0; k < bpc; k++) {
2321  dst[bpc * j + k] = tmpbuf[(bpc - k - 1) * group_size + j];
2322  }
2323  }
2324  dst += stride;
2325  }
2326  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32BE ||
2327  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32BE) {
2328  /* Same as LE only the shuffle at the end is reversed */
2329  for (i = 0; i < decoded_height; i++) {
2330  for (j = 0; j < channels; j++)
2331  tmpbuf[j] = dst[j];
2332 
2333  for (j = channels; j < ssize; j++)
2334  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2335 
2336  for (j = 0; j < group_size; j++) {
2337  for (int k = 0; k < bpc; k++) {
2338  dst[bpc * j + k] = tmpbuf[k * group_size + j];
2339  }
2340  }
2341  dst += stride;
2342  }
2343  } else {
2344  av_log(s->avctx, AV_LOG_ERROR, "unsupported floating point pixel format\n");
2345  }
2346  av_free(tmpbuf);
2347  }
2348 
2349  if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
2350  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2351  dst = p->data[plane];
2352  for (i = 0; i < s->height; i++) {
2353  for (j = 0; j < stride; j++)
2354  dst[j] = c - dst[j];
2355  dst += stride;
2356  }
2357  }
2358 
2359  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2360  (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
2361  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2362  uint8_t *src = five_planes ? five_planes : p->data[plane];
2363  dst = p->data[plane];
2364  for (i = 0; i < s->height; i++) {
2365  for (j = 0; j < s->width; j++) {
2366  int k = 255 - src[x * j + 3];
2367  int r = (255 - src[x * j ]) * k;
2368  int g = (255 - src[x * j + 1]) * k;
2369  int b = (255 - src[x * j + 2]) * k;
2370  dst[4 * j ] = r * 257 >> 16;
2371  dst[4 * j + 1] = g * 257 >> 16;
2372  dst[4 * j + 2] = b * 257 >> 16;
2373  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2374  }
2375  src += stride;
2376  dst += p->linesize[plane];
2377  }
2378  av_freep(&five_planes);
2379  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2380  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
2381  dst = p->data[plane];
2382  for (i = 0; i < s->height; i++) {
2383  for (j = 0; j < s->width; j++) {
2384  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2385  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2386  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2387  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2388  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2389  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2390  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2391  AV_WB16(dst + 8 * j + 6, 65535);
2392  }
2393  dst += p->linesize[plane];
2394  }
2395  }
2396  }
2397 
2398  if (s->planar && s->bppcount > 2) {
2399  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2400  FFSWAP(int, p->linesize[0], p->linesize[2]);
2401  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2402  FFSWAP(int, p->linesize[0], p->linesize[1]);
2403  }
2404 
2405  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2406  uint16_t *dst = (uint16_t *)p->data[0];
2407  for (i = 0; i < s->height; i++) {
2408  for (j = 0; j < s->width; j++)
2409  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2410  dst += stride / 2;
2411  }
2412  }
2413 
2414  ret = ff_exif_attach_ifd(avctx, p, &s->exif_meta);
2415  if (ret < 0)
2416  av_log(avctx, AV_LOG_ERROR, "error attaching EXIF ifd: %s\n", av_err2str(ret));
2417 
2418  *got_frame = 1;
2419 
2420  return avpkt->size;
2421 }
2422 
2424 {
2425  TiffContext *s = avctx->priv_data;
2426  int ret;
2427 
2428  s->width = 0;
2429  s->height = 0;
2430  s->subsampling[0] =
2431  s->subsampling[1] = 1;
2432  s->avctx = avctx;
2433  ff_lzw_decode_open(&s->lzw);
2434  if (!s->lzw)
2435  return AVERROR(ENOMEM);
2437 
2438  /* Allocate JPEG frame */
2439  s->jpgframe = av_frame_alloc();
2440  s->jpkt = av_packet_alloc();
2441  if (!s->jpgframe || !s->jpkt)
2442  return AVERROR(ENOMEM);
2443 
2444  /* Prepare everything needed for JPEG decoding */
2446  s->avctx_mjpeg = avcodec_alloc_context3(&ff_mjpeg_decoder.p);
2447  if (!s->avctx_mjpeg)
2448  return AVERROR(ENOMEM);
2449  s->avctx_mjpeg->flags = avctx->flags;
2450  s->avctx_mjpeg->flags2 = avctx->flags2;
2451  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2452  s->avctx_mjpeg->max_pixels = avctx->max_pixels;
2453  ret = avcodec_open2(s->avctx_mjpeg, NULL, NULL);
2454  if (ret < 0) {
2455  return ret;
2456  }
2457 
2458  return 0;
2459 }
2460 
2461 static av_cold int tiff_end(AVCodecContext *avctx)
2462 {
2463  TiffContext *const s = avctx->priv_data;
2464 
2465  free_geotags(s);
2466  av_exif_free(&s->exif_meta);
2467 
2468  ff_lzw_decode_close(&s->lzw);
2469  av_freep(&s->deinvert_buf);
2470  s->deinvert_buf_size = 0;
2471  av_freep(&s->yuv_line);
2472  s->yuv_line_size = 0;
2473  av_frame_free(&s->jpgframe);
2474  av_packet_free(&s->jpkt);
2475  avcodec_free_context(&s->avctx_mjpeg);
2476  return 0;
2477 }
2478 
2479 #define OFFSET(x) offsetof(TiffContext, x)
2480 static const AVOption tiff_options[] = {
2481  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2482  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2483  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2484  { NULL },
2485 };
2486 
2487 static const AVClass tiff_decoder_class = {
2488  .class_name = "TIFF decoder",
2489  .item_name = av_default_item_name,
2490  .option = tiff_options,
2491  .version = LIBAVUTIL_VERSION_INT,
2492 };
2493 
2495  .p.name = "tiff",
2496  CODEC_LONG_NAME("TIFF image"),
2497  .p.type = AVMEDIA_TYPE_VIDEO,
2498  .p.id = AV_CODEC_ID_TIFF,
2499  .priv_data_size = sizeof(TiffContext),
2500  .init = tiff_init,
2501  .close = tiff_end,
2503  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2506  .p.priv_class = &tiff_decoder_class,
2507 };
TiffContext::tiff_type
enum TiffType tiff_type
Definition: tiff.c:74
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:682
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
ff_tadd_string_metadata
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:209
TiffContext::gb
GetByteContext gb
Definition: tiff.c:63
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
TIFF_GEOG_LINEAR_UNITS_GEOKEY
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
Definition: tiff.h:147
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
ff_tiff_decoder
const FFCodec ff_tiff_decoder
Definition: tiff.c:2494
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
bytestream2_get_eof
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:332
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
DNG_AS_SHOT_WHITE_XY
@ DNG_AS_SHOT_WHITE_XY
Definition: tiff.h:112
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:140
get_geokey_type
static int get_geokey_type(int key)
Definition: tiff.c:162
tiff_decode_tag
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1253
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:748
DNG_COLOR_MATRIX2
@ DNG_COLOR_MATRIX2
Definition: tiff.h:107
elements
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:565
TIFF_PHOTOMETRIC_ICC_LAB
@ TIFF_PHOTOMETRIC_ICC_LAB
Definition: tiff.h:198
TIFF_JPEG
@ TIFF_JPEG
Definition: tiff.h:131
TiffContext::exif_meta
AVExifMetadata exif_meta
Definition: tiff.c:129
GetByteContext
Definition: bytestream.h:33
AVExifMetadata
Definition: exif.h:76
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:171
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3441
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
TiffContext::dng_lut
uint16_t dng_lut[65536]
Definition: tiff.c:104
camera_xyz_coeff
static void camera_xyz_coeff(TiffContext *s, float rgb2cam[3][4], double cam2xyz[4][3])
Definition: tiff.c:1902
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:670
TiffContext::strippos
int strippos
Definition: tiff.c:111
TIFF_CFA_PATTERN_DIM
@ TIFF_CFA_PATTERN_DIM
Definition: tiff.h:87
TIFF_PROJ_COORD_TRANS_GEOKEY
@ TIFF_PROJ_COORD_TRANS_GEOKEY
Definition: tiff.h:160
OFFSET
#define OFFSET(x)
Definition: tiff.c:2479
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1398
TiffContext::sot
int sot
Definition: tiff.c:110
int64_t
long long int64_t
Definition: coverity.c:34
doubles2str
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:249
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
tiff_projection_codes
static const TiffGeoTagKeyName tiff_projection_codes[]
Definition: tiff_data.h:1536
TIFF_CCITT_RLE
@ TIFF_CCITT_RLE
Definition: tiff.h:127
TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
Definition: tiff.h:155
av_unused
#define av_unused
Definition: attributes.h:131
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
mjpegdec.h
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:202
tiff_end
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2461
AV_PIX_FMT_GBRAPF32LE
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:344
w
uint8_t w
Definition: llviddspenc.c:38
TiffContext::tile_offsets_offset
int tile_offsets_offset
Definition: tiff.c:116
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
TIFF_ADOBE_DEFLATE
@ TIFF_ADOBE_DEFLATE
Definition: tiff.h:133
AV_PIX_FMT_GBRPF32BE
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:341
TIFF_COPYRIGHT
@ TIFF_COPYRIGHT
Definition: tiff.h:89
AVPacket::data
uint8_t * data
Definition: packet.h:552
TIFF_PHOTOMETRIC_ITU_LAB
@ TIFF_PHOTOMETRIC_ITU_LAB
Definition: tiff.h:199
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
RET_GEOKEY_VAL
#define RET_GEOKEY_VAL(TYPE, array)
TIFF_NEWJPEG
@ TIFF_NEWJPEG
Definition: tiff.h:132
FFCodec
Definition: codec_internal.h:127
float.h
deinvert_buffer
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:445
reverse.h
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
TIFF_ROWSPERSTRIP
@ TIFF_ROWSPERSTRIP
Definition: tiff.h:58
TiffContext::pattern
uint8_t pattern[4]
Definition: tiff.c:93
TIFF_GEOG_ELLIPSOID_GEOKEY
@ TIFF_GEOG_ELLIPSOID_GEOKEY
Definition: tiff.h:151
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
TIFF_GEO_KEY_USER_DEFINED
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:120
TIFF_PROJECTION_GEOKEY
@ TIFF_PROJECTION_GEOKEY
Definition: tiff.h:159
TIFF_PROJ_LINEAR_UNITS_GEOKEY
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
Definition: tiff.h:161
TIFF_RAW
@ TIFF_RAW
Definition: tiff.h:126
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
TIFF_GEO_DOUBLE_PARAMS
@ TIFF_GEO_DOUBLE_PARAMS
Definition: tiff.h:95
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
AV_PIX_FMT_BAYER_GRBG16
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:574
TiffGeoTagKeyName
Definition: tiff.h:220
TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
Definition: tiff.h:190
thread.h
TIFF_PACKBITS
@ TIFF_PACKBITS
Definition: tiff.h:134
TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
Definition: tiff.h:146
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
TiffContext::is_jpeg
int is_jpeg
Definition: tiff.c:119
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
dng_process_color16
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
Definition: tiff.c:289
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
TIFF_GEO_KEY_UNDEFINED
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:119
tiff_options
static const AVOption tiff_options[]
Definition: tiff.c:2480
TiffContext::get_thumbnail
int get_thumbnail
Definition: tiff.c:72
TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_LINEAR_RAW
Definition: tiff.h:203
TIFF_FILL_ORDER
@ TIFF_FILL_ORDER
Definition: tiff.h:51
TIFF_PHOTOMETRIC_ALPHA_MASK
@ TIFF_PHOTOMETRIC_ALPHA_MASK
Definition: tiff.h:194
TiffContext::deinvert_buf_size
int deinvert_buf_size
Definition: tiff.c:122
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:104
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
AV_TIFF_SHORT
@ AV_TIFF_SHORT
Definition: exif.h:45
TIFF_DATE
@ TIFF_DATE
Definition: tiff.h:72
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
TIFF_TILE_BYTE_COUNTS
@ TIFF_TILE_BYTE_COUNTS
Definition: tiff.h:80
ff_ccitt_unpack
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:393
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
unpack_yuv
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:472
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
tiff_set_type
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:134
dng_decode_tiles
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, const AVPacket *avpkt)
Definition: tiff.c:972
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:194
TIFF_YCBCR_SUBSAMPLING
@ TIFF_YCBCR_SUBSAMPLING
Definition: tiff.h:84
TIFF_MAKE
@ TIFF_MAKE
Definition: tiff.h:54
GetBitContext
Definition: get_bits.h:109
TIFF_GEOG_GEODETIC_DATUM_GEOKEY
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
Definition: tiff.h:145
TiffContext::deinvert_buf
uint8_t * deinvert_buf
Definition: tiff.c:121
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:590
TiffContext::tile_length
int tile_length
Definition: tiff.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
TIFF_T6OPTIONS
@ TIFF_T6OPTIONS
Definition: tiff.h:68
val
static double val(void *priv, double ch)
Definition: aeval.c:77
horizontal_fill
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:389
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
TiffContext::color_matrix
float color_matrix[3][4]
Definition: tiff.c:98
TIFF_VERTICAL_CS_TYPE_GEOKEY
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
Definition: tiff.h:181
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
TIFF_SOFTWARE_NAME
@ TIFF_SOFTWARE_NAME
Definition: tiff.h:71
FF_LZW_TIFF
@ FF_LZW_TIFF
Definition: lzw.h:39
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
TiffContext::as_shot_neutral
float as_shot_neutral[4]
Definition: tiff.c:96
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:666
TiffContext::geotags
TiffGeoTag * geotags
Definition: tiff.c:127
DNG_LINEARIZATION_TABLE
@ DNG_LINEARIZATION_TABLE
Definition: tiff.h:103
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
get_geokey_val
static const char * get_geokey_val(int key, uint16_t val)
Definition: tiff.c:190
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
TiffGeoTag
Definition: tiff.h:212
TIFF_GRAY_RESPONSE_CURVE
@ TIFF_GRAY_RESPONSE_CURVE
Definition: tiff.h:66
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TiffContext::rps
int rps
Definition: tiff.c:109
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
TIFF_SUBFILE
@ TIFF_SUBFILE
Definition: tiff.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
TiffContext::premultiply
float premultiply[4]
Definition: tiff.c:100
TiffContext::camera_calibration
float camera_calibration[4][4]
Definition: tiff.c:99
CINEMADNG_T_STOP
@ CINEMADNG_T_STOP
Definition: tiff.h:119
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
float
float
Definition: af_crystalizer.c:122
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:213
TiffContext::stripsize
int stripsize
Definition: tiff.c:111
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
attributes_internal.h
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:341
tiff_proj_cs_type_codes
static const TiffGeoTagKeyName tiff_proj_cs_type_codes[]
Definition: tiff_data.h:559
intreadwrite.h
TIFF_G4
@ TIFF_G4
Definition: tiff.h:129
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:172
TiffContext::width
int width
Definition: tiff.c:75
AV_PIX_FMT_BAYER_BGGR8
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:285
g
const char * g
Definition: vf_curves.c:128
AV_TIFF_RATIONAL
@ AV_TIFF_RATIONAL
Definition: exif.h:47
TiffType
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:34
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1048
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
TIFF_STRIP_SIZE
@ TIFF_STRIP_SIZE
Definition: tiff.h:59
fminf
float fminf(float, float)
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:707
TiffContext::yuv_line
uint8_t * yuv_line
Definition: tiff.c:123
TIFF_GEOGRAPHIC_TYPE_GEOKEY
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
Definition: tiff.h:143
dng_decode_jpeg
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:653
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
TIFF_PHOTOMETRIC_LOG_L
@ TIFF_PHOTOMETRIC_LOG_L
Definition: tiff.h:201
TiffContext::use_color_matrix
int use_color_matrix
Definition: tiff.c:92
ff_tadd_shorts_metadata
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:166
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
TiffContext::get_page
uint16_t get_page
Definition: tiff.c:71
LZWState
Definition: lzw.c:46
TIFF_IMAGE_DESCRIPTION
@ TIFF_IMAGE_DESCRIPTION
Definition: tiff.h:53
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1782
TiffContext::is_bayer
int is_bayer
Definition: tiff.c:91
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
key
const char * key
Definition: hwcontext_opencl.c:189
TiffContext::jpgframe
AVFrame * jpgframe
Definition: tiff.c:68
TiffContext::compr
enum TiffCompr compr
Definition: tiff.c:80
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
TiffContext::photometric
enum TiffPhotometric photometric
Definition: tiff.c:81
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
EXTERN
#define EXTERN
Definition: attributes_internal.h:34
search_keyval
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:181
AV_PIX_FMT_BAYER_RGGB8
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:286
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
AV_PIX_FMT_BAYER_BGGR16
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:571
if
if(ret)
Definition: filter_design.txt:179
dng_process_color8
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Definition: tiff.c:308
ff_ccitt_unpack_init
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:119
TiffContext::geotag_count
int geotag_count
Definition: tiff.c:126
TiffContext::height
int height
Definition: tiff.c:75
TIFF_PAGE_NAME
@ TIFF_PAGE_NAME
Definition: tiff.h:63
TIFF_VERTICAL_UNITS_GEOKEY
@ TIFF_VERTICAL_UNITS_GEOKEY
Definition: tiff.h:184
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
TIFF_LZW
@ TIFF_LZW
Definition: tiff.h:130
tiff_init
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2423
TiffContext::as_shot_white
float as_shot_white[4]
Definition: tiff.c:97
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_tget_short
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:45
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
AV_PIX_FMT_GBRAPF32BE
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:343
NULL
#define NULL
Definition: coverity.c:32
exif_internal.h
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
TIFF_PHOTOMETRIC_YCBCR
@ TIFF_PHOTOMETRIC_YCBCR
Definition: tiff.h:196
TiffContext
Definition: tiff.c:60
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
TiffContext::is_thumbnail
int is_thumbnail
Definition: tiff.c:88
tiff_data.h
TiffContext::avctx
AVCodecContext * avctx
Definition: tiff.c:62
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:110
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:210
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
tiff.h
TIFF_PHOTOMETRIC_PALETTE
@ TIFF_PHOTOMETRIC_PALETTE
Definition: tiff.h:193
tiff_common.h
TiffContext::get_subimage
int get_subimage
Definition: tiff.c:70
DNG_AS_SHOT_NEUTRAL
@ DNG_AS_SHOT_NEUTRAL
Definition: tiff.h:111
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:241
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:203
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
TIFF_MODEL_TIEPOINT
@ TIFF_MODEL_TIEPOINT
Definition: tiff.h:90
TIFF_PHOTOMETRIC_CIE_LAB
@ TIFF_PHOTOMETRIC_CIE_LAB
Definition: tiff.h:197
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
TiffContext::black_level
float black_level[4]
Definition: tiff.c:101
AV_PIX_FMT_BAYER_GBRG16
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:573
MJpegDecodeContext
Definition: mjpegdec.h:55
TIFF_PAL
@ TIFF_PAL
Definition: tiff.h:76
RET_GEOKEY_TYPE
#define RET_GEOKEY_TYPE(TYPE, array)
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
TIFF_ARTIST
@ TIFF_ARTIST
Definition: tiff.h:73
CINEMADNG_TIME_CODES
@ CINEMADNG_TIME_CODES
Definition: tiff.h:117
TIFF_SAMPLES_PER_PIXEL
@ TIFF_SAMPLES_PER_PIXEL
Definition: tiff.h:57
TIFF_G3
@ TIFF_G3
Definition: tiff.h:128
TIFF_WIDTH
@ TIFF_WIDTH
Definition: tiff.h:46
TIFF_TILE_OFFSETS
@ TIFF_TILE_OFFSETS
Definition: tiff.h:79
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
error.h
TiffContext::palette
uint32_t palette[256]
Definition: tiff.c:77
PutByteContext
Definition: bytestream.h:37
ff_tread_tag
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:254
AV_TIFF_BYTE
@ AV_TIFF_BYTE
Definition: exif.h:43
AV_PIX_FMT_RGBF32BE
@ AV_PIX_FMT_RGBF32BE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian.
Definition: pixfmt.h:420
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:495
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:553
TIFF_TYPE_CINEMADNG
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:40
height
#define height
Definition: dsp.h:89
codec_internal.h
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
TiffContext::analog_balance
float analog_balance[4]
Definition: tiff.c:95
lzw.h
LZW decoding routines.
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
DNG_CAMERA_CALIBRATION1
@ DNG_CAMERA_CALIBRATION1
Definition: tiff.h:108
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
bps
unsigned bps
Definition: movenc.c:1958
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:209
TIFF_GEO_ASCII_PARAMS
@ TIFF_GEO_ASCII_PARAMS
Definition: tiff.h:96
size
int size
Definition: twinvq_data.h:10344
xyz2rgb
static const float xyz2rgb[3][3]
Definition: tiff.c:1896
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2029
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TiffContext::bpp
unsigned int bpp
Definition: tiff.c:76
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
TIFF_GT_MODEL_TYPE_GEOKEY
@ TIFF_GT_MODEL_TYPE_GEOKEY
Definition: tiff.h:140
TiffContext::jpkt
AVPacket * jpkt
Definition: tiff.c:67
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
TIFF_DOCUMENT_NAME
@ TIFF_DOCUMENT_NAME
Definition: tiff.h:52
TiffContext::fill_order
int fill_order
Definition: tiff.c:86
TIFF_MODEL_TRANSFORMATION
@ TIFF_MODEL_TRANSFORMATION
Definition: tiff.h:92
TIFF_TILE_LENGTH
@ TIFF_TILE_LENGTH
Definition: tiff.h:78
TIFF_MODEL
@ TIFF_MODEL
Definition: tiff.h:55
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:408
TiffContext::white_level
unsigned white_level
Definition: tiff.c:103
TiffContext::stripsizesoff
int stripsizesoff
Definition: tiff.c:111
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
AV_TIFF_STRING
@ AV_TIFF_STRING
Definition: exif.h:44
TiffContext::planar
int planar
Definition: tiff.c:82
TIFF_COMPR
@ TIFF_COMPR
Definition: tiff.h:49
TIFF_HEIGHT
@ TIFF_HEIGHT
Definition: tiff.h:47
cmp_id_key
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:176
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
tiff_decoder_class
static const AVClass tiff_decoder_class
Definition: tiff.c:2487
planes
static const struct @513 planes[]
DNG_BLACK_LEVEL
@ DNG_BLACK_LEVEL
Definition: tiff.h:104
TIFF_T4OPTIONS
@ TIFF_T4OPTIONS
Definition: tiff.h:67
TIFF_PHOTOMETRIC_LOG_LUV
@ TIFF_PHOTOMETRIC_LOG_LUV
Definition: tiff.h:202
TiffContext::le
int le
Definition: tiff.c:79
CINEMADNG_REEL_NAME
@ CINEMADNG_REEL_NAME
Definition: tiff.h:120
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:701
TiffContext::subsampling
int subsampling[2]
Definition: tiff.c:83
TIFF_PAGE_NUMBER
@ TIFF_PAGE_NUMBER
Definition: tiff.h:70
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1928
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:109
TIFF_PHOTOMETRIC_CFA
@ TIFF_PHOTOMETRIC_CFA
Definition: tiff.h:200
AV_TIFF_SRATIONAL
@ AV_TIFF_SRATIONAL
Definition: exif.h:52
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_tget_long
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:51
TIFF_PHOTOMETRIC_BLACK_IS_ZERO
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
Definition: tiff.h:191
TiffContext::tile_width
int tile_width
Definition: tiff.c:117
TiffContext::fax_opts
int fax_opts
Definition: tiff.c:84
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
TiffContext::bppcount
unsigned int bppcount
Definition: tiff.c:76
unpack_gray
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:458
TiffContext::res
uint32_t res[4]
Definition: tiff.c:87
TIFF_MODEL_PIXEL_SCALE
@ TIFF_MODEL_PIXEL_SCALE
Definition: tiff.h:91
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
TIFF_PLANAR
@ TIFF_PLANAR
Definition: tiff.h:62
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AV_PIX_FMT_BAYER_GBRG8
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:287
TIFF_TYPE_TIFF
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:36
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MJpegDecodeContext::bayer
int bayer
Definition: mjpegdec.h:76
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:358
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:494
ff_exif_attach_ifd
int ff_exif_attach_ifd(void *logctx, AVFrame *frame, const AVExifMetadata *ifd)
Attach an already-parsed EXIF metadata struct to the frame as a side data buffer.
Definition: exif.c:1234
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1526
TIFF_TYPE_DNG
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
Definition: tiff.h:38
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
DNG_VERSION
@ DNG_VERSION
Definition: tiff.h:101
TiffContext::stripoff
int stripoff
Definition: tiff.c:111
len
int len
Definition: vorbis_enc_data.h:426
AV_PIX_FMT_GBRPF32LE
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:342
TIFF_PHOTOMETRIC_NONE
@ TIFF_PHOTOMETRIC_NONE
Definition: tiff.h:189
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
TIFF_CFA_PATTERN
@ TIFF_CFA_PATTERN
Definition: tiff.h:88
TIFF_STRIP_OFFS
@ TIFF_STRIP_OFFS
Definition: tiff.h:56
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
TIFF_TILE_WIDTH
@ TIFF_TILE_WIDTH
Definition: tiff.h:77
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:214
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
tag
uint32_t tag
Definition: movenc.c:1957
ret
ret
Definition: filter_design.txt:187
TIFF_HOST_COMPUTER
@ TIFF_HOST_COMPUTER
Definition: tiff.h:74
DNG_WHITE_LEVEL
@ DNG_WHITE_LEVEL
Definition: tiff.h:105
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
TiffContext::palette_is_set
int palette_is_set
Definition: tiff.c:78
TIFF_BPP
@ TIFF_BPP
Definition: tiff.h:48
d65_white
static const float d65_white[3]
Definition: tiff.c:132
pos
unsigned int pos
Definition: spdifenc.c:414
get_geokey_name
static const char * get_geokey_name(int key)
Definition: tiff.c:147
TIFF_PHOTOMETRIC
@ TIFF_PHOTOMETRIC
Definition: tiff.h:50
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
ff_tget_double
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:57
TiffPhotometric
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
Definition: tiff.h:188
TiffContext::last_tag
unsigned last_tag
Definition: tiff.c:89
AVCodecContext
main external API structure.
Definition: avcodec.h:431
ADD_METADATA
#define ADD_METADATA(count, name, sep)
AV_PIX_FMT_RGBAF32BE
@ AV_PIX_FMT_RGBAF32BE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian.
Definition: pixfmt.h:423
TiffContext::sstype
int sstype
Definition: tiff.c:109
again
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
Definition: filter_design.txt:25
TIFF_PREDICTOR
@ TIFF_PREDICTOR
Definition: tiff.h:75
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:705
TiffContext::lzw
LZWState * lzw
Definition: tiff.c:112
set_sar
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1234
TIFF_LZMA
@ TIFF_LZMA
Definition: tiff.h:136
tiff_unpack_fax
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:632
TIFF_GEO_KEY_DIRECTORY
@ TIFF_GEO_KEY_DIRECTORY
Definition: tiff.h:94
CINEMADNG_CAMERA_LABEL
@ CINEMADNG_CAMERA_LABEL
Definition: tiff.h:121
AV_TIFF_DOUBLE
@ AV_TIFF_DOUBLE
Definition: exif.h:54
TiffContext::is_tiled
int is_tiled
Definition: tiff.c:115
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:229
AV_PIX_FMT_RGBF32LE
@ AV_PIX_FMT_RGBF32LE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian.
Definition: pixfmt.h:421
RET_GEOKEY_STR
#define RET_GEOKEY_STR(TYPE, array)
TIFF_YRES
@ TIFF_YRES
Definition: tiff.h:61
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_clip_uint16
#define av_clip_uint16
Definition: common.h:112
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
TIFF_ICC_PROFILE
@ TIFF_ICC_PROFILE
Definition: tiff.h:93
faxcompr.h
DNG_CAMERA_CALIBRATION2
@ DNG_CAMERA_CALIBRATION2
Definition: tiff.h:109
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:356
desc
const char * desc
Definition: libsvtav1.c:79
AV_PIX_FMT_RGBAF32LE
@ AV_PIX_FMT_RGBAF32LE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian.
Definition: pixfmt.h:424
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
init_image
static int init_image(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1045
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
free_geotags
static void free_geotags(TiffContext *const s)
Definition: tiff.c:139
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
TIFF_DEFLATE
@ TIFF_DEFLATE
Definition: tiff.h:135
TIFF_PHOTOMETRIC_RGB
@ TIFF_PHOTOMETRIC_RGB
Definition: tiff.h:192
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:529
TIFF_SUB_IFDS
@ TIFF_SUB_IFDS
Definition: tiff.h:81
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
dng_blit
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16, int odd_line)
Definition: tiff.c:316
tiff_unpack_strip
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:743
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
DNG_COLOR_MATRIX1
@ DNG_COLOR_MATRIX1
Definition: tiff.h:106
TiffContext::tile_byte_counts_offset
int tile_byte_counts_offset
Definition: tiff.c:116
ff_tadd_doubles_metadata
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:145
TiffContext::avctx_mjpeg
AVCodecContext * avctx_mjpeg
Definition: tiff.c:66
TIFF_XRES
@ TIFF_XRES
Definition: tiff.h:60
add_metadata
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:275
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
TiffCompr
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:125
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
TIFF_GEOG_ANGULAR_UNITS_GEOKEY
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
Definition: tiff.h:149
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
TiffContext::cur_page
uint16_t cur_page
Definition: tiff.c:107
h
h
Definition: vp9dsp_template.c:2070
AV_CODEC_ID_TIFF
@ AV_CODEC_ID_TIFF
Definition: codec_id.h:148
avstring.h
type_sizes
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:37
width
#define width
Definition: dsp.h:89
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:520
TiffContext::predictor
int predictor
Definition: tiff.c:85
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:572
AV_TIFF_LONG
@ AV_TIFF_LONG
Definition: exif.h:46
snprintf
#define snprintf
Definition: snprintf.h:34
ff_tget
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:64
TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PHOTOMETRIC_SEPARATED
Definition: tiff.h:195
TiffContext::strips
int strips
Definition: tiff.c:109
TIFF_PROJECTED_CS_TYPE_GEOKEY
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
Definition: tiff.h:157
CINEMADNG_FRAME_RATE
@ CINEMADNG_FRAME_RATE
Definition: tiff.h:118
TiffContext::sub_ifd
uint32_t sub_ifd
Definition: tiff.c:106
AV_PIX_FMT_BAYER_GRBG8
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:288
src
#define src
Definition: vp8dsp.c:248
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
TiffContext::yuv_line_size
unsigned int yuv_line_size
Definition: tiff.c:124
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
DNG_ANALOG_BALANCE
@ DNG_ANALOG_BALANCE
Definition: tiff.h:110
TIFF_GT_RASTER_TYPE_GEOKEY
@ TIFF_GT_RASTER_TYPE_GEOKEY
Definition: tiff.h:141