Go to the documentation of this file.
32 #define LZMA_API_STATIC
132 static const float d65_white[3] = { 0.950456f, 1.f, 1.088754f };
135 if (
s->tiff_type < tiff_type)
136 s->tiff_type = tiff_type;
141 for (
int i = 0;
i <
s->geotag_count;
i++)
149 #define RET_GEOKEY_STR(TYPE, array)\
150 if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
151 key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
152 return tiff_##array##_name_type_string + tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].offset;
164 #define RET_GEOKEY_TYPE(TYPE, array)\
165 if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
166 key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
167 return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].type;
195 return "User-Defined";
197 #define RET_GEOKEY_VAL(TYPE, array)\
198 if (val >= TIFF_##TYPE##_OFFSET &&\
199 val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\
200 return tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET];
253 uint64_t component_len;
254 if (!sep) sep =
", ";
255 component_len = 24LL + strlen(sep);
256 if (count >= (INT_MAX - 1)/component_len)
258 ap =
av_malloc(component_len * count + 1);
263 for (
i = 0;
i < count;
i++) {
264 unsigned l =
snprintf(ap, component_len,
"%.15g%s", dp[
i], sep);
265 if(l >= component_len) {
271 ap0[strlen(ap0) - strlen(sep)] =
'\0';
301 value_norm = ((
float)
value - black_level) * scale_factor;
318 int is_single_comp,
int is_u16,
int odd_line)
320 float scale_factor[4];
324 for (
int i = 0;
i < 4;
i++)
325 scale_factor[
i] =
s->premultiply[
s->pattern[
i]] * 65535.f / (
s->white_level -
s->black_level[
i]);
327 for (
int i = 0;
i < 4;
i++)
328 scale_factor[
i] =
s->premultiply[
i ] * 65535.f / (
s->white_level -
s->black_level[
i]);
331 if (is_single_comp) {
338 uint16_t *dst_u16 = (uint16_t *)
dst;
339 const uint16_t *src_u16 = (
const uint16_t *)
src;
342 for (col = 0; col <
width; col++)
343 *dst_u16++ =
dng_process_color16(*src_u16++,
s->dng_lut,
s->black_level[col&1], scale_factor[col&1]);
346 dst += dst_stride *
sizeof(uint16_t);
347 dst_u16 = (uint16_t *)
dst;
350 for (col = 0; col <
width; col++)
351 *dst_u16++ =
dng_process_color16(*src_u16++,
s->dng_lut,
s->black_level[(col&1) + 2], scale_factor[(col&1) + 2]);
353 dst += dst_stride *
sizeof(uint16_t);
354 src += src_stride *
sizeof(uint16_t);
361 uint16_t *dst_u16 = (uint16_t *)
dst;
362 const uint16_t *src_u16 = (
const uint16_t *)
src;
364 for (col = 0; col <
width; col++)
366 s->black_level[(col&1) + 2 * ((
line&1) + odd_line)],
367 scale_factor[(col&1) + 2 * ((
line&1) + odd_line)]);
369 dst += dst_stride *
sizeof(uint16_t);
370 src += src_stride *
sizeof(uint16_t);
374 uint8_t *dst_u8 =
dst;
375 const uint8_t *src_u8 =
src;
377 for (col = 0; col <
width; col++)
379 s->black_level[(col&1) + 2 * ((
line&1) + odd_line)],
380 scale_factor[(col&1) + 2 * ((
line&1) + odd_line)]);
390 unsigned int bpp, uint8_t*
dst,
391 int usePtr,
const uint8_t *
src,
396 while (--
width >= 0) {
408 while (--
width >= 0) {
416 while (--
width >= 0) {
424 uint16_t *dst16 = (uint16_t *)
dst;
426 uint8_t
shift = is_dng ? 0 : 16 - bpp;
431 for (
int i = 0;
i <
s->width;
i++) {
450 if (!
s->deinvert_buf)
459 const uint8_t *
src,
int lnum,
int width,
int bpp)
467 for (
int i = 0;
i <
s->width;
i++) {
473 const uint8_t *
src,
int lnum)
476 int w = (
s->width - 1) /
s->subsampling[0] + 1;
477 uint8_t *pu = &p->
data[1][lnum /
s->subsampling[1] * p->
linesize[1]];
478 uint8_t *pv = &p->
data[2][lnum /
s->subsampling[1] * p->
linesize[2]];
479 if (
s->width %
s->subsampling[0] ||
s->height %
s->subsampling[1]) {
480 for (
i = 0;
i <
w;
i++) {
481 for (j = 0; j <
s->subsampling[1]; j++)
482 for (k = 0; k <
s->subsampling[0]; k++)
484 FFMIN(
i *
s->subsampling[0] + k,
s->width-1)] = *
src++;
489 for (
i = 0;
i <
w;
i++) {
490 for (j = 0; j <
s->subsampling[1]; j++)
491 for (k = 0; k <
s->subsampling[0]; k++)
493 i *
s->subsampling[0] + k] = *
src++;
501 static int tiff_uncompress(uint8_t *
dst,
unsigned long *
len,
const uint8_t *
src,
504 z_stream zstream = { 0 };
507 zstream.next_in =
src;
508 zstream.avail_in =
size;
509 zstream.next_out =
dst;
510 zstream.avail_out = *
len;
511 zret = inflateInit(&zstream);
516 zret =
inflate(&zstream, Z_SYNC_FLUSH);
517 inflateEnd(&zstream);
518 *
len = zstream.total_out;
519 return zret == Z_STREAM_END ? Z_OK : zret;
524 int strip_start,
int is_yuv)
527 unsigned long outlen;
529 outlen =
width * lines;
538 src =
s->deinvert_buf;
540 ret = tiff_uncompress(zbuf, &outlen,
src,
size);
543 "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
557 line +=
s->subsampling[1] - 1;
568 static int tiff_uncompress_lzma(uint8_t *
dst, uint64_t *
len,
const uint8_t *
src,
571 lzma_stream stream = LZMA_STREAM_INIT;
574 stream.next_in =
src;
575 stream.avail_in =
size;
576 stream.next_out =
dst;
577 stream.avail_out = *
len;
578 ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
579 if (
ret != LZMA_OK) {
583 ret = lzma_code(&stream, LZMA_RUN);
585 *
len = stream.total_out;
586 return ret == LZMA_STREAM_END ? LZMA_OK :
ret;
591 int strip_start,
int is_yuv)
593 uint64_t outlen =
width * (uint64_t)lines;
603 src =
s->deinvert_buf;
605 ret = tiff_uncompress_lzma(buf, &outlen,
src,
size);
606 if (
ret != LZMA_OK) {
608 "Uncompressing failed (%"PRIu64
" of %"PRIu64
") with error %d\n", outlen,
622 line +=
s->subsampling[1] - 1;
641 src =
s->deinvert_buf;
644 s->compr,
s->fax_opts);
654 int tile_byte_count,
int dst_x,
int dst_y,
int w,
int h)
657 uint8_t *dst_data, *src_data;
659 int is_single_comp, is_u16, pixel_size;
667 s->jpkt->data = (uint8_t*)
s->gb.buffer;
668 s->jpkt->size = tile_byte_count;
674 mjpegdecctx->
bayer = 1;
694 is_u16 = (
s->bpp > 8);
698 if (
s->jpgframe->width !=
s->avctx_mjpeg->width ||
699 s->jpgframe->height !=
s->avctx_mjpeg->height ||
700 s->jpgframe->format !=
s->avctx_mjpeg->pix_fmt)
704 if (
s->avctx_mjpeg->width ==
w * 2 &&
705 s->avctx_mjpeg->height ==
h / 2 &&
708 }
else if (
s->avctx_mjpeg->width >=
w &&
709 s->avctx_mjpeg->height >=
h &&
716 pixel_size = (is_u16 ?
sizeof(uint16_t) :
sizeof(uint8_t));
718 if (is_single_comp && !is_u16) {
724 dst_offset = dst_x +
frame->linesize[0] * dst_y / pixel_size;
725 dst_data =
frame->data[0] + dst_offset * pixel_size;
726 src_data =
s->jpgframe->data[0];
730 frame->linesize[0] / pixel_size,
732 s->jpgframe->linesize[0] / pixel_size,
744 const uint8_t *
src,
int size,
int strip_start,
int lines)
748 const uint8_t *ssrc =
src;
749 int width = ((
s->width *
s->bpp) + 7) >> 3;
753 desc->nb_components >= 3;
763 int bytes_per_row = (((
s->width - 1) /
s->subsampling[0] + 1) *
s->bpp *
764 s->subsampling[0] *
s->subsampling[1] + 7) >> 3;
766 if (
s->yuv_line ==
NULL) {
773 width = (
s->width - 1) /
s->subsampling[0] + 1;
784 if (
s->yuv_line ==
NULL) {
795 strip_start, is_yuv);
798 "zlib support not enabled, "
799 "deflate compression not supported\n");
806 strip_start, is_yuv);
809 "LZMA support not enabled\n");
817 ssrc =
src =
s->deinvert_buf;
828 if (pixels <
width) {
837 line +=
s->subsampling[1] - 1;
872 if (is_dng &&
stride == 0)
889 if (!
s->fill_order) {
900 int is_u16, pixel_size_bytes, pixel_size_bits,
elements;
902 is_u16 = (
s->bpp /
s->bppcount > 8);
903 pixel_size_bits = (is_u16 ? 16 : 8);
904 pixel_size_bytes = (is_u16 ?
sizeof(uint16_t) :
sizeof(uint8_t));
906 elements =
width / pixel_size_bytes * pixel_size_bits /
s->bpp *
s->bppcount;
917 (
line + strip_start)&1);
923 for (pixels = 0; pixels <
width;) {
934 "Copy went out of bounds\n");
941 }
else if (
code != -128) {
945 "Run went out of bounds\n");
963 line +=
s->subsampling[1] - 1;
977 int tile_offset_offset, tile_offset;
978 int tile_byte_count_offset, tile_byte_count;
979 int tile_count_x, tile_count_y;
980 int tile_width, tile_length;
981 int has_width_leftover, has_height_leftover;
982 int tile_x = 0, tile_y = 0;
983 int pos_x = 0, pos_y = 0;
986 if (
s->tile_width <= 0 ||
s->tile_length <= 0)
989 has_width_leftover = (
s->width %
s->tile_width != 0);
990 has_height_leftover = (
s->height %
s->tile_length != 0);
993 tile_count_x = (
s->width +
s->tile_width - 1) /
s->tile_width;
994 tile_count_y = (
s->height +
s->tile_length - 1) /
s->tile_length;
997 for (tile_idx = 0; tile_idx < tile_count_x * tile_count_y; tile_idx++) {
998 tile_x = tile_idx % tile_count_x;
999 tile_y = tile_idx / tile_count_x;
1001 if (has_width_leftover && tile_x == tile_count_x - 1)
1002 tile_width =
s->width %
s->tile_width;
1004 tile_width =
s->tile_width;
1006 if (has_height_leftover && tile_y == tile_count_y - 1)
1007 tile_length =
s->height %
s->tile_length;
1009 tile_length =
s->tile_length;
1012 tile_offset_offset =
s->tile_offsets_offset + tile_idx *
sizeof(int);
1017 tile_byte_count_offset =
s->tile_byte_counts_offset + tile_idx *
sizeof(int);
1031 pos_x += tile_width;
1032 if (tile_x == tile_count_x - 1) {
1034 pos_y += tile_length;
1048 int create_gray_palette = 0;
1051 if (
s->bpp > 128 ||
s->bppcount >= 10) {
1053 "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1054 s->bpp,
s->bppcount);
1058 switch (
s->planar * 10000 +
s->bpp * 10 +
s->bppcount +
s->is_bayer * 100000) {
1060 if (!
s->palette_is_set) {
1067 if (!
s->palette_is_set) {
1068 create_gray_palette = 1;
1122 if (
s->subsampling[0] == 1 &&
s->subsampling[1] == 1) {
1124 }
else if (
s->subsampling[0] == 2 &&
s->subsampling[1] == 1) {
1126 }
else if (
s->subsampling[0] == 4 &&
s->subsampling[1] == 1) {
1128 }
else if (
s->subsampling[0] == 1 &&
s->subsampling[1] == 2) {
1130 }
else if (
s->subsampling[0] == 2 &&
s->subsampling[1] == 2) {
1132 }
else if (
s->subsampling[0] == 4 &&
s->subsampling[1] == 4) {
1158 "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1194 "This format is not supported (bpp=%d, bppcount=%d)\n",
1195 s->bpp,
s->bppcount);
1203 desc->nb_components < 3) {
1209 if (
s->width !=
s->avctx->width ||
s->height !=
s->avctx->height) {
1221 if (!create_gray_palette)
1222 memcpy(
frame->data[1],
s->palette,
sizeof(
s->palette));
1226 uint32_t *pal = (uint32_t *)
frame->data[1];
1227 for (
i = 0;
i < 1<<
s->bpp;
i++)
1228 pal[
i] = 0xFFU << 24 |
i * 255 / ((1<<
s->bpp) - 1) * 0x010101;
1239 if (
s->res[0] &&
s->res[1] &&
s->res[2] &&
s->res[3]) {
1240 uint64_t num =
s->res[2] * (uint64_t)
s->res[1];
1241 uint64_t den =
s->res[0] * (uint64_t)
s->res[3];
1242 if (num > INT64_MAX || den > INT64_MAX) {
1246 av_reduce(&
s->avctx->sample_aspect_ratio.num, &
s->avctx->sample_aspect_ratio.den,
1247 num, den, INT32_MAX);
1248 if (!
s->avctx->sample_aspect_ratio.den)
1249 s->avctx->sample_aspect_ratio = (
AVRational) {0, 1};
1267 if (tag <= s->last_tag)
1302 s->is_thumbnail = (
value != 0);
1305 if (
value > INT_MAX)
1310 if (
value > INT_MAX)
1315 if (count > 5 || count <= 0) {
1317 "This format is not supported (bpp=%d, %d components)\n",
1321 s->bppcount = count;
1332 for (
i = 0;
i < count;
i++)
1343 "Samples per pixel requires a single value, many provided\n");
1348 "Invalid samples per pixel %d\n",
value);
1351 if (
s->bppcount == 1)
1401 if (
value > INT_MAX) {
1403 "strippos %u too large\n",
value);
1411 if (
s->strips ==
s->bppcount)
1417 if (
value > INT_MAX) {
1419 "stripsize %u too large\n",
value);
1422 s->stripsizesoff = 0;
1426 s->stripsizesoff = off;
1436 s->tile_offsets_offset = off;
1440 s->tile_byte_counts_offset = off;
1443 if (
value > INT_MAX)
1448 if (
value > INT_MAX)
1453 if (
value > INT_MAX)
1467 for (
int i = 0;
i < count;
i++)
1469 s->white_level =
s->dng_lut[count-1];
1475 for (
int i = 0; i < count && count > 1;
i++) {
1498 for (
int i = count; i < 4 && count > 0;
i++)
1499 s->black_level[
i] =
s->black_level[count - 1];
1537 "PhotometricInterpretation 0x%04X",
1542 "unknown\n",
value);
1547 if (value < 1 || value > 2) {
1549 "Unknown FillOrder value %d, trying default one\n",
value);
1552 s->fill_order =
value - 1;
1557 if (count / 3 > 256 ||
1561 pal_gb[0] = pal_gb[1] = pal_gb[2] =
s->gb;
1571 for (
i = 0;
i < count / 3;
i++) {
1572 uint32_t p = 0xFF000000;
1578 s->palette_is_set = 1;
1589 for (
i = 0;
i < count;
i++) {
1591 if (
s->subsampling[
i] <= 0) {
1593 s->subsampling[
i] = 1;
1600 if (
value > INT_MAX)
1607 if (
value > INT_MAX)
1612 #define ADD_METADATA(count, name, sep)\
1613 if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1614 av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1627 if (
s->geotag_count) {
1634 if (
s->geotag_count > count / 4 - 1) {
1635 s->geotag_count = count / 4 - 1;
1639 ||
s->geotag_count == 0) {
1640 s->geotag_count = 0;
1643 s->geotags =
av_calloc(
s->geotag_count,
sizeof(*
s->geotags));
1646 s->geotag_count = 0;
1649 for (
i = 0;
i <
s->geotag_count;
i++) {
1656 if (!
s->geotags[
i].type) {
1660 if (!
s->geotags[
i].val)
1663 s->geotags[
i].offset =
val;
1667 if (count >= INT_MAX /
sizeof(
int64_t))
1676 for (
i = 0;
i < count;
i++)
1678 for (
i = 0;
i <
s->geotag_count;
i++) {
1680 if (
s->geotags[
i].count == 0
1681 ||
s->geotags[
i].offset +
s->geotags[
i].count > count) {
1683 }
else if (
s->geotags[
i].val) {
1686 char *ap =
doubles2str(&dp[
s->geotags[
i].offset],
s->geotags[
i].count,
", ");
1692 s->geotags[
i].val = ap;
1700 for (
i = 0;
i <
s->geotag_count;
i++) {
1702 if (
s->geotags[
i].count == 0
1703 ||
s->geotags[
i].offset +
s->geotags[
i].count > count) {
1711 if (
s->geotags[
i].val)
1719 ap[
s->geotags[
i].count - 1] =
'\0';
1720 s->geotags[
i].val = ap;
1772 bytestream2_seek(&
s->gb, count *
sizeof(uint16_t) -
sizeof(uint16_t), SEEK_CUR);
1779 unsigned int ver[4];
1786 ver[0], ver[1], ver[2], ver[3]);
1795 for (
int i = 0;
i < 3;
i++) {
1810 for (
int i = 0;
i < 3;
i++) {
1825 for (
int i = 0;
i < 2;
i++) {
1835 s->as_shot_white[2] = 1.f -
s->as_shot_white[0] -
s->as_shot_white[1];
1836 for (
int i = 0;
i < 3;
i++) {
1842 for (
int i = 0;
i < 3;
i++) {
1843 for (
int j = 0; j < 3; j++) {
1852 s->use_color_matrix = 1;
1857 for (
int i = 0;
i < 3;
i++) {
1858 for (
int j = 0; j < 3; j++) {
1879 "Unknown or unsupported tag %d/0x%0X\n",
1885 if (
s->bpp > 128
U) {
1887 "This format is not supported (bpp=%d, %d components)\n",
1897 { 0.412453f, 0.357580f, 0.180423f },
1898 { 0.212671f, 0.715160f, 0.072169f },
1899 { 0.019334f, 0.119193f, 0.950227f },
1903 float rgb2cam[3][4],
1904 double cam2xyz[4][3])
1906 double cam2rgb[4][3], num;
1909 for (
i = 0;
i < 3;
i++) {
1910 for (j = 0; j < 3; j++) {
1912 for (k = 0; k < 3; k++)
1913 cam2rgb[
i][j] += cam2xyz[
i][k] *
xyz2rgb[k][j];
1917 for (
i = 0;
i < 3;
i++) {
1918 for (num = j = 0; j < 3; j++)
1919 num += cam2rgb[
i][j];
1922 for (j = 0; j < 3; j++)
1923 cam2rgb[
i][j] /= num;
1924 s->premultiply[
i] = 1.f / num;
1932 unsigned off, last_off = 0;
1935 unsigned soff, ssize;
1939 int retry_for_subifd, retry_for_page;
1941 int has_tile_bits, has_strip_bits;
1955 }
else if (off >= UINT_MAX - 14 || avpkt->
size < off + 14) {
1963 s->use_color_matrix = 0;
1965 s->is_thumbnail = 0;
1966 s->bppcount =
s->bpp = 1;
1977 for (
i = 0;
i < 65536;
i++)
1981 s->black_level[
i] = 0.f;
1984 s->as_shot_neutral[
i] = 0.f;
1987 s->as_shot_white[
i] = 1.f;
1990 s->analog_balance[
i] = 1.f;
1993 s->premultiply[
i] = 1.f;
1996 for (j = 0; j < 4; j++)
1997 s->camera_calibration[
i][j] =
i == j;
2002 s->stripsizesoff =
s->strippos = 0;
2008 for (
i = 0;
i < entries;
i++) {
2013 if (
s->get_thumbnail && !
s->is_thumbnail) {
2019 retry_for_subifd =
s->sub_ifd && (
s->get_subimage || (!
s->get_thumbnail &&
s->is_thumbnail));
2021 retry_for_page =
s->get_page &&
s->cur_page + 1 <
s->get_page;
2023 if (retry_for_page) {
2026 }
else if (retry_for_subifd) {
2031 if (retry_for_subifd || retry_for_page) {
2036 if (off <= last_off) {
2041 if (off >= UINT_MAX - 14 || avpkt->
size < off + 14) {
2053 for (
i = 0;
i<
s->geotag_count;
i++) {
2064 s->geotags[
i].val =
NULL;
2072 double cam2xyz[4][3];
2073 float cmatrix[3][4];
2074 float pmin = FLT_MAX;
2077 for (
i = 0;
i < 3;
i++) {
2078 for (j = 0; j < 3; j++)
2079 s->camera_calibration[
i][j] *=
s->analog_balance[
i];
2082 if (!
s->use_color_matrix) {
2083 for (
i = 0;
i < 3;
i++) {
2084 if (
s->camera_calibration[
i][
i])
2085 s->premultiply[
i] /=
s->camera_calibration[
i][
i];
2088 for (
int c = 0;
c < 3;
c++) {
2089 for (
i = 0;
i < 3;
i++) {
2091 for (j = 0; j < 3; j++)
2092 cam2xyz[
c][
i] +=
s->camera_calibration[
c][j] *
s->color_matrix[j][
i] *
s->as_shot_white[
i];
2099 for (
int c = 0;
c < 3;
c++)
2100 pmin =
fminf(pmin,
s->premultiply[
c]);
2102 for (
int c = 0;
c < 3;
c++)
2103 s->premultiply[
c] /= pmin;
2105 if (
s->bpp %
s->bppcount)
2107 bps =
s->bpp /
s->bppcount;
2108 if (bps < 8 || bps > 32)
2111 if (
s->white_level == 0)
2112 s->white_level = (1LL <<
bps) - 1;
2114 if (
s->white_level <=
s->black_level[0]) {
2115 av_log(avctx,
AV_LOG_ERROR,
"BlackLevel (%g) must be less than WhiteLevel (%"PRId32
")\n",
2116 s->black_level[0],
s->white_level);
2124 if (!
s->is_tiled && !
s->strippos && !
s->stripoff) {
2129 has_tile_bits =
s->is_tiled ||
s->tile_byte_counts_offset ||
s->tile_offsets_offset ||
s->tile_width ||
s->tile_length;
2130 has_strip_bits =
s->strippos ||
s->strips ||
s->stripoff ||
s->rps ||
s->sot ||
s->sstype ||
s->stripsize ||
s->stripsizesoff;
2132 if (has_tile_bits && has_strip_bits) {
2133 int tiled_dng =
s->is_tiled && is_dng;
2143 if (!
s->is_tiled || has_strip_bits) {
2144 if (
s->strips == 1 && !
s->stripsize) {
2146 s->stripsize = avpkt->
size -
s->stripoff;
2149 if (
s->stripsizesoff) {
2150 if (
s->stripsizesoff >= (
unsigned)avpkt->
size)
2153 avpkt->
size -
s->stripsizesoff);
2156 if (
s->strippos >= (
unsigned)avpkt->
size)
2159 avpkt->
size -
s->strippos);
2162 if (
s->rps <= 0 ||
s->rps %
s->subsampling[1]) {
2177 if (is_dng &&
s->is_tiled) {
2181 }
else if (!
s->is_bayer) {
2193 planes =
s->planar ?
s->bppcount : 1;
2194 for (plane = 0; plane <
planes; plane++) {
2195 uint8_t *five_planes =
NULL;
2196 int remaining = avpkt->
size;
2208 for (
i = 0;
i <
s->height;
i +=
s->rps) {
2211 if (
s->stripsizesoff)
2212 ssize =
ff_tget(&stripsizes,
s->sstype, le);
2214 ssize =
s->stripsize;
2217 soff =
ff_tget(&stripdata,
s->sot, le);
2221 if (soff > avpkt->
size || ssize > avpkt->
size - soff || ssize > remaining) {
2228 FFMIN(
s->rps,
s->height -
i))) < 0) {
2236 decoded_height =
FFMIN(
i,
s->height);
2238 if (
s->predictor == 2) {
2243 dst = five_planes ? five_planes : p->
data[plane];
2246 soff =
FFMAX(soff /
s->bppcount, 1);
2247 ssize =
s->width * soff;
2254 for (
i = 0;
i < decoded_height;
i++) {
2255 for (j = soff; j < ssize; j += 2)
2265 for (
i = 0;
i < decoded_height;
i++) {
2266 for (j = soff; j < ssize; j += 2)
2271 for (
i = 0;
i < decoded_height;
i++) {
2272 for (j = soff; j < ssize; j++)
2281 if (
s->predictor == 3) {
2287 dst = five_planes ? five_planes : p->
data[plane];
2290 soff =
FFMAX(soff /
s->bppcount, 1);
2293 ssize =
s->width * soff;
2294 bpc =
FFMAX(soff /
s->bppcount, 1);
2305 for (
i = 0;
i < decoded_height;
i++) {
2319 for (j = 0; j < group_size; j++) {
2320 for (
int k = 0; k < bpc; k++) {
2321 dst[bpc * j + k] = tmpbuf[(bpc - k - 1) * group_size + j];
2329 for (
i = 0;
i < decoded_height;
i++) {
2336 for (j = 0; j < group_size; j++) {
2337 for (
int k = 0; k < bpc; k++) {
2338 dst[bpc * j + k] = tmpbuf[k * group_size + j];
2352 for (
i = 0;
i <
s->height;
i++) {
2353 for (j = 0; j <
stride; j++)
2362 uint8_t *
src = five_planes ? five_planes : p->
data[plane];
2364 for (
i = 0;
i <
s->height;
i++) {
2365 for (j = 0; j <
s->width; j++) {
2366 int k = 255 -
src[x * j + 3];
2367 int r = (255 -
src[x * j ]) * k;
2368 int g = (255 -
src[x * j + 1]) * k;
2369 int b = (255 -
src[x * j + 2]) * k;
2370 dst[4 * j ] =
r * 257 >> 16;
2371 dst[4 * j + 1] =
g * 257 >> 16;
2372 dst[4 * j + 2] =
b * 257 >> 16;
2382 for (
i = 0;
i <
s->height;
i++) {
2383 for (j = 0; j <
s->width; j++) {
2384 uint64_t k = 65535 -
AV_RB16(
dst + 8 * j + 6);
2386 uint64_t
g = (65535 -
AV_RB16(
dst + 8 * j + 2)) * k;
2387 uint64_t
b = (65535 -
AV_RB16(
dst + 8 * j + 4)) * k;
2398 if (
s->planar &&
s->bppcount > 2) {
2405 if (
s->is_bayer &&
s->white_level &&
s->bpp == 16 && !is_dng) {
2406 uint16_t *
dst = (uint16_t *)p->
data[0];
2407 for (
i = 0;
i <
s->height;
i++) {
2408 for (j = 0; j <
s->width; j++)
2409 dst[j] =
FFMIN((
dst[j] / (
float)
s->white_level) * 65535, 65535);
2431 s->subsampling[1] = 1;
2441 if (!
s->jpgframe || !
s->jpkt)
2447 if (!
s->avctx_mjpeg)
2449 s->avctx_mjpeg->flags = avctx->
flags;
2450 s->avctx_mjpeg->flags2 = avctx->
flags2;
2451 s->avctx_mjpeg->idct_algo = avctx->
idct_algo;
2470 s->deinvert_buf_size = 0;
2472 s->yuv_line_size = 0;
2479 #define OFFSET(x) offsetof(TiffContext, x)
enum AVColorTransferCharacteristic color_trc
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
#define AV_LOG_WARNING
Something somehow does not look correct.
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
const FFCodec ff_tiff_decoder
#define AV_EF_EXPLODE
abort decoding on minor error detection
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
static int get_geokey_type(int key)
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
static const ElemCat * elements[ELEMENT_COUNT]
@ TIFF_PHOTOMETRIC_ICC_LAB
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
static av_always_inline int bytestream2_tell(const GetByteContext *g)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
static void camera_xyz_coeff(TiffContext *s, float rgb2cam[3][4], double cam2xyz[4][3])
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
@ TIFF_PROJ_COORD_TRANS_GEOKEY
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
static char * doubles2str(double *dp, int count, const char *sep)
char * av_asprintf(const char *fmt,...)
static const TiffGeoTagKeyName tiff_projection_codes[]
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
static av_cold int tiff_end(AVCodecContext *avctx)
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
const FFCodec ff_mjpeg_decoder
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
@ TIFF_PHOTOMETRIC_ITU_LAB
const uint8_t ff_reverse[256]
#define RET_GEOKEY_VAL(TYPE, array)
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
@ TIFF_GEOG_ELLIPSOID_GEOKEY
#define TIFF_GEO_KEY_USER_DEFINED
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
av_cold void ff_lzw_decode_close(LZWState **p)
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
#define AV_PIX_FMT_BAYER_GRBG16
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
#define TIFF_GEO_KEY_UNDEFINED
static const AVOption tiff_options[]
@ TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_ALPHA_MASK
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
static av_cold void close(AVCodecParserContext *s)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
AVCodec p
The public AVCodec.
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, const AVPacket *avpkt)
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
#define AV_PIX_FMT_GRAY16
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
@ DNG_LINEARIZATION_TABLE
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static const char * get_geokey_val(int key, uint16_t val)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
@ TIFF_GRAY_RESPONSE_CURVE
#define FF_ARRAY_ELEMS(a)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
float camera_calibration[4][4]
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
#define FF_CODEC_DECODE_CB(func)
static const TiffGeoTagKeyName tiff_proj_cs_type_codes[]
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
TiffType
TIFF types in ascenting priority (last in the list is highest)
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
av_cold void ff_lzw_decode_open(LZWState **p)
float fminf(float, float)
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
int64_t max_pixels
The number of pixels per image to maximally accept.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define CODEC_LONG_NAME(str)
enum TiffPhotometric photometric
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define AV_PIX_FMT_BAYER_BGGR16
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
@ TIFF_VERTICAL_UNITS_GEOKEY
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
static av_cold int tiff_init(AVCodecContext *avctx)
#define LIBAVUTIL_VERSION_INT
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Describe the class of an AVClass context structure.
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Rational number (pair of numerator and denominator).
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
@ TIFF_PHOTOMETRIC_PALETTE
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
@ AV_PICTURE_TYPE_I
Intra.
@ TIFF_PHOTOMETRIC_CIE_LAB
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
#define AV_PIX_FMT_BAYER_GBRG16
#define RET_GEOKEY_TYPE(TYPE, array)
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
@ AV_PIX_FMT_RGBF32BE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian.
int flags2
AV_CODEC_FLAG2_*.
int(* init)(AVBSFContext *ctx)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
static int shift(int a, int b)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
@ DNG_CAMERA_CALIBRATION1
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
static const float xyz2rgb[3][3]
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
@ TIFF_GT_MODEL_TYPE_GEOKEY
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
@ TIFF_MODEL_TRANSFORMATION
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
static int cmp_id_key(const void *id, const void *k)
#define AV_LOG_INFO
Standard information.
static const AVClass tiff_decoder_class
static const struct @513 planes[]
@ TIFF_PHOTOMETRIC_LOG_LUV
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
static int decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
#define av_malloc_array(a, b)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
#define AV_OPT_FLAG_VIDEO_PARAM
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int ff_exif_attach_ifd(void *logctx, AVFrame *frame, const AVExifMetadata *ifd)
Attach an already-parsed EXIF metadata struct to the frame as a side data buffer.
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
const char * name
Name of the codec implementation.
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
void * av_calloc(size_t nmemb, size_t size)
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
#define FFSWAP(type, a, b)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const float d65_white[3]
static const char * get_geokey_name(int key)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
main external API structure.
#define ADD_METADATA(count, name, sep)
@ AV_PIX_FMT_RGBAF32BE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
@ AV_OPT_TYPE_INT
Underlying C type is int.
AVDictionary * metadata
metadata.
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
@ AV_PIX_FMT_RGBF32LE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian.
#define RET_GEOKEY_STR(TYPE, array)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ DNG_CAMERA_CALIBRATION2
char * av_strdup(const char *s)
Duplicate a string.
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
@ AV_PIX_FMT_RGBAF32LE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian.
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
static int init_image(TiffContext *s, AVFrame *frame)
#define avpriv_request_sample(...)
Structure to hold side data for an AVFrame.
static void free_geotags(TiffContext *const s)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16, int odd_line)
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
int tile_byte_counts_offset
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
AVCodecContext * avctx_mjpeg
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
#define AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_BAYER_RGGB16
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
@ TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
unsigned int yuv_line_size
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
@ TIFF_GT_RASTER_TYPE_GEOKEY