Go to the documentation of this file.
34 #include "config_components.h"
42 for (
i = 0;
i <
h;
i++) {
43 s += sq[pix1[0] - pix2[0]];
44 s += sq[pix1[1] - pix2[1]];
45 s += sq[pix1[2] - pix2[2]];
46 s += sq[pix1[3] - pix2[3]];
59 for (
i = 0;
i <
h;
i++) {
60 s += sq[pix1[0] - pix2[0]];
61 s += sq[pix1[1] - pix2[1]];
62 s += sq[pix1[2] - pix2[2]];
63 s += sq[pix1[3] - pix2[3]];
64 s += sq[pix1[4] - pix2[4]];
65 s += sq[pix1[5] - pix2[5]];
66 s += sq[pix1[6] - pix2[6]];
67 s += sq[pix1[7] - pix2[7]];
80 for (
i = 0;
i <
h;
i++) {
81 s += sq[pix1[0] - pix2[0]];
82 s += sq[pix1[1] - pix2[1]];
83 s += sq[pix1[2] - pix2[2]];
84 s += sq[pix1[3] - pix2[3]];
85 s += sq[pix1[4] - pix2[4]];
86 s += sq[pix1[5] - pix2[5]];
87 s += sq[pix1[6] - pix2[6]];
88 s += sq[pix1[7] - pix2[7]];
89 s += sq[pix1[8] - pix2[8]];
90 s += sq[pix1[9] - pix2[9]];
91 s += sq[pix1[10] - pix2[10]];
92 s += sq[pix1[11] - pix2[11]];
93 s += sq[pix1[12] - pix2[12]];
94 s += sq[pix1[13] - pix2[13]];
95 s += sq[pix1[14] - pix2[14]];
96 s += sq[pix1[15] - pix2[15]];
108 for (
i = 0;
i < 64;
i++)
113 #define avg2(a, b) (((a) + (b) + 1) >> 1)
114 #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
121 for (
i = 0;
i <
h;
i++) {
122 s +=
abs(pix1[0] - pix2[0]);
123 s +=
abs(pix1[1] - pix2[1]);
124 s +=
abs(pix1[2] - pix2[2]);
125 s +=
abs(pix1[3] - pix2[3]);
126 s +=
abs(pix1[4] - pix2[4]);
127 s +=
abs(pix1[5] - pix2[5]);
128 s +=
abs(pix1[6] - pix2[6]);
129 s +=
abs(pix1[7] - pix2[7]);
130 s +=
abs(pix1[8] - pix2[8]);
131 s +=
abs(pix1[9] - pix2[9]);
132 s +=
abs(pix1[10] - pix2[10]);
133 s +=
abs(pix1[11] - pix2[11]);
134 s +=
abs(pix1[12] - pix2[12]);
135 s +=
abs(pix1[13] - pix2[13]);
136 s +=
abs(pix1[14] - pix2[14]);
137 s +=
abs(pix1[15] - pix2[15]);
149 #define V(x) (pix1[x] - pix2[x])
171 for (
i = 1;
i <
h;
i++) {
173 for (j = 1; j < 16; j++)
188 for (
i = 0;
i <
h;
i++) {
189 s +=
abs(pix1[0] -
avg2(pix2[0], pix2[1]));
190 s +=
abs(pix1[1] -
avg2(pix2[1], pix2[2]));
191 s +=
abs(pix1[2] -
avg2(pix2[2], pix2[3]));
192 s +=
abs(pix1[3] -
avg2(pix2[3], pix2[4]));
193 s +=
abs(pix1[4] -
avg2(pix2[4], pix2[5]));
194 s +=
abs(pix1[5] -
avg2(pix2[5], pix2[6]));
195 s +=
abs(pix1[6] -
avg2(pix2[6], pix2[7]));
196 s +=
abs(pix1[7] -
avg2(pix2[7], pix2[8]));
197 s +=
abs(pix1[8] -
avg2(pix2[8], pix2[9]));
198 s +=
abs(pix1[9] -
avg2(pix2[9], pix2[10]));
199 s +=
abs(pix1[10] -
avg2(pix2[10], pix2[11]));
200 s +=
abs(pix1[11] -
avg2(pix2[11], pix2[12]));
201 s +=
abs(pix1[12] -
avg2(pix2[12], pix2[13]));
202 s +=
abs(pix1[13] -
avg2(pix2[13], pix2[14]));
203 s +=
abs(pix1[14] -
avg2(pix2[14], pix2[15]));
204 s +=
abs(pix1[15] -
avg2(pix2[15], pix2[16]));
215 const uint8_t *pix3 = pix2 +
stride;
217 for (
i = 0;
i <
h;
i++) {
218 s +=
abs(pix1[0] -
avg2(pix2[0], pix3[0]));
219 s +=
abs(pix1[1] -
avg2(pix2[1], pix3[1]));
220 s +=
abs(pix1[2] -
avg2(pix2[2], pix3[2]));
221 s +=
abs(pix1[3] -
avg2(pix2[3], pix3[3]));
222 s +=
abs(pix1[4] -
avg2(pix2[4], pix3[4]));
223 s +=
abs(pix1[5] -
avg2(pix2[5], pix3[5]));
224 s +=
abs(pix1[6] -
avg2(pix2[6], pix3[6]));
225 s +=
abs(pix1[7] -
avg2(pix2[7], pix3[7]));
226 s +=
abs(pix1[8] -
avg2(pix2[8], pix3[8]));
227 s +=
abs(pix1[9] -
avg2(pix2[9], pix3[9]));
228 s +=
abs(pix1[10] -
avg2(pix2[10], pix3[10]));
229 s +=
abs(pix1[11] -
avg2(pix2[11], pix3[11]));
230 s +=
abs(pix1[12] -
avg2(pix2[12], pix3[12]));
231 s +=
abs(pix1[13] -
avg2(pix2[13], pix3[13]));
232 s +=
abs(pix1[14] -
avg2(pix2[14], pix3[14]));
233 s +=
abs(pix1[15] -
avg2(pix2[15], pix3[15]));
245 const uint8_t *pix3 = pix2 +
stride;
247 for (
i = 0;
i <
h;
i++) {
248 s +=
abs(pix1[0] -
avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
249 s +=
abs(pix1[1] -
avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
250 s +=
abs(pix1[2] -
avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
251 s +=
abs(pix1[3] -
avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
252 s +=
abs(pix1[4] -
avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
253 s +=
abs(pix1[5] -
avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
254 s +=
abs(pix1[6] -
avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
255 s +=
abs(pix1[7] -
avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
256 s +=
abs(pix1[8] -
avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
257 s +=
abs(pix1[9] -
avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
258 s +=
abs(pix1[10] -
avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
259 s +=
abs(pix1[11] -
avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
260 s +=
abs(pix1[12] -
avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
261 s +=
abs(pix1[13] -
avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
262 s +=
abs(pix1[14] -
avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
263 s +=
abs(pix1[15] -
avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
276 for (
i = 0;
i <
h;
i++) {
277 s +=
abs(pix1[0] - pix2[0]);
278 s +=
abs(pix1[1] - pix2[1]);
279 s +=
abs(pix1[2] - pix2[2]);
280 s +=
abs(pix1[3] - pix2[3]);
281 s +=
abs(pix1[4] - pix2[4]);
282 s +=
abs(pix1[5] - pix2[5]);
283 s +=
abs(pix1[6] - pix2[6]);
284 s +=
abs(pix1[7] - pix2[7]);
296 #define V(x) (pix1[x] - pix2[x])
310 for (
i = 1;
i <
h;
i++) {
312 for (j = 1; j < 8; j++)
327 for (
i = 0;
i <
h;
i++) {
328 s +=
abs(pix1[0] -
avg2(pix2[0], pix2[1]));
329 s +=
abs(pix1[1] -
avg2(pix2[1], pix2[2]));
330 s +=
abs(pix1[2] -
avg2(pix2[2], pix2[3]));
331 s +=
abs(pix1[3] -
avg2(pix2[3], pix2[4]));
332 s +=
abs(pix1[4] -
avg2(pix2[4], pix2[5]));
333 s +=
abs(pix1[5] -
avg2(pix2[5], pix2[6]));
334 s +=
abs(pix1[6] -
avg2(pix2[6], pix2[7]));
335 s +=
abs(pix1[7] -
avg2(pix2[7], pix2[8]));
346 const uint8_t *pix3 = pix2 +
stride;
348 for (
i = 0;
i <
h;
i++) {
349 s +=
abs(pix1[0] -
avg2(pix2[0], pix3[0]));
350 s +=
abs(pix1[1] -
avg2(pix2[1], pix3[1]));
351 s +=
abs(pix1[2] -
avg2(pix2[2], pix3[2]));
352 s +=
abs(pix1[3] -
avg2(pix2[3], pix3[3]));
353 s +=
abs(pix1[4] -
avg2(pix2[4], pix3[4]));
354 s +=
abs(pix1[5] -
avg2(pix2[5], pix3[5]));
355 s +=
abs(pix1[6] -
avg2(pix2[6], pix3[6]));
356 s +=
abs(pix1[7] -
avg2(pix2[7], pix3[7]));
368 const uint8_t *pix3 = pix2 +
stride;
370 for (
i = 0;
i <
h;
i++) {
371 s +=
abs(pix1[0] -
avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
372 s +=
abs(pix1[1] -
avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
373 s +=
abs(pix1[2] -
avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
374 s +=
abs(pix1[3] -
avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
375 s +=
abs(pix1[4] -
avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
376 s +=
abs(pix1[5] -
avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
377 s +=
abs(pix1[6] -
avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
378 s +=
abs(pix1[7] -
avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
389 int score1 = 0, score2 = 0, x, y;
391 for (y = 0; y <
h; y++) {
392 for (x = 0; x < 16; x++)
393 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
395 for (x = 0; x < 15; x++)
397 s1[x + 1] + s1[x +
stride + 1]) -
399 s2[x + 1] + s2[x +
stride + 1]);
406 return score1 +
FFABS(score2) *
c->c.avctx->nsse_weight;
408 return score1 +
FFABS(score2) * 8;
414 int score1 = 0, score2 = 0, x, y;
416 for (y = 0; y <
h; y++) {
417 for (x = 0; x < 8; x++)
418 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
420 for (x = 0; x < 7; x++)
422 s1[x + 1] + s1[x +
stride + 1]) -
424 s2[x + 1] + s2[x +
stride + 1]);
431 return score1 +
FFABS(score2) *
c->c.avctx->nsse_weight;
433 return score1 +
FFABS(score2) * 8;
444 #define ENTRY(CMP_FLAG, ARRAY, MPVENC_ONLY) \
445 [FF_CMP_ ## CMP_FLAG] = { \
446 .offset = offsetof(MECmpContext, ARRAY), \
447 .mpv_only = MPVENC_ONLY, \
450 static const struct {
454 } cmp_func_list[] = {
457 ENTRY(SATD, hadamard8_diff, 0),
458 ENTRY(DCT, dct_sad, 1),
459 ENTRY(PSNR, quant_psnr, 1),
464 ENTRY(NSSE, nsse, 0),
465 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
469 ENTRY(DCTMAX, dct_max, 1),
471 ENTRY(DCT264, dct264_sad, 1),
473 ENTRY(MEDIAN_SAD, median_sad, 0),
480 for (
int i = 0;
i < 6;
i++)
486 !mpvenc && cmp_func_list[
type].mpv_only) {
488 "invalid cmp function selection\n");
491 me_cmp_func_array = (
const me_cmp_func*)(((
const char*)
c) + cmp_func_list[
type].offset);
492 for (
int i = 0;
i < 6;
i++)
493 cmp[
i] = me_cmp_func_array[
i];
498 #define BUTTERFLY2(o1, o2, i1, i2) \
502 #define BUTTERFLY1(x, y) \
511 #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
516 int i,
temp[64], sum = 0;
518 for (
i = 0;
i < 8;
i++) {
544 for (
i = 0;
i < 8;
i++) {
566 int i,
temp[64], sum = 0;
568 for (
i = 0;
i < 8;
i++) {
590 for (
i = 0;
i < 8;
i++) {
620 return s->sum_abs_dctelem(
temp);
626 const int s07 = SRC(0) + SRC(7); \
627 const int s16 = SRC(1) + SRC(6); \
628 const int s25 = SRC(2) + SRC(5); \
629 const int s34 = SRC(3) + SRC(4); \
630 const int a0 = s07 + s34; \
631 const int a1 = s16 + s25; \
632 const int a2 = s07 - s34; \
633 const int a3 = s16 - s25; \
634 const int d07 = SRC(0) - SRC(7); \
635 const int d16 = SRC(1) - SRC(6); \
636 const int d25 = SRC(2) - SRC(5); \
637 const int d34 = SRC(3) - SRC(4); \
638 const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
639 const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
640 const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
641 const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
643 DST(1, a4 + (a7 >> 2)); \
644 DST(2, a2 + (a3 >> 1)); \
645 DST(3, a5 + (a6 >> 2)); \
647 DST(5, a6 - (a5 >> 2)); \
648 DST(6, (a2 >> 1) - a3); \
649 DST(7, (a4 >> 2) - a7); \
660 #define SRC(x) dct[i][x]
661 #define DST(x, v) dct[i][x] = v
662 for (
i = 0;
i < 8;
i++)
668 #define DST(x, v) sum += FFABS(v)
669 for (
i = 0;
i < 8;
i++)
686 for (
i = 0;
i < 64;
i++)
696 int16_t *
const bak =
temp + 64;
703 memcpy(bak,
temp, 64 *
sizeof(int16_t));
705 s->c.block_last_index[0 ] =
706 s->dct_quantize(
s,
temp, 0 ,
s->c.qscale, &
i);
707 s->c.dct_unquantize_inter(&
s->c,
temp, 0,
s->c.qscale);
710 for (
i = 0;
i < 64;
i++)
719 const uint8_t *scantable =
s->c.intra_scantable.permutated;
724 const int esc_length =
s->ac_esc_length;
725 const uint8_t *length, *last_length;
730 s->pdsp.diff_pixels(
temp, lsrc1, lsrc2, 8);
732 s->c.block_last_index[0 ] =
734 s->dct_quantize(
s,
temp, 0 ,
s->c.qscale, &
i);
740 length =
s->intra_ac_vlc_length;
741 last_length =
s->intra_ac_vlc_last_length;
742 bits +=
s->luma_dc_vlc_length[
temp[0] + 256];
745 length =
s->inter_ac_vlc_length;
746 last_length =
s->inter_ac_vlc_last_length;
749 if (last >= start_i) {
751 for (
i = start_i;
i < last;
i++) {
752 int j = scantable[
i];
757 if ((
level & (~127)) == 0)
771 if ((
level & (~127)) == 0) {
779 s->c.dct_unquantize_intra(&
s->c,
temp, 0,
s->c.qscale);
781 s->c.dct_unquantize_inter(&
s->c,
temp, 0,
s->c.qscale);
784 s->c.idsp.idct_add(lsrc2, 8,
temp);
786 distortion =
s->sse_cmp[1](
NULL, lsrc2, lsrc1, 8, 8);
788 return distortion + ((
bits *
s->c.qscale *
s->c.qscale * 109 + 64) >> 7);
794 const uint8_t *scantable =
s->c.intra_scantable.permutated;
797 const int esc_length =
s->ac_esc_length;
798 const uint8_t *length, *last_length;
802 s->c.block_last_index[0 ] =
804 s->dct_quantize(
s,
temp, 0 ,
s->c.qscale, &
i);
810 length =
s->intra_ac_vlc_length;
811 last_length =
s->intra_ac_vlc_last_length;
812 bits +=
s->luma_dc_vlc_length[
temp[0] + 256];
815 length =
s->inter_ac_vlc_length;
816 last_length =
s->inter_ac_vlc_last_length;
819 if (last >= start_i) {
821 for (
i = start_i;
i < last;
i++) {
822 int j = scantable[
i];
827 if ((
level & (~127)) == 0)
841 if ((
level & (~127)) == 0)
850 #define VSAD_INTRA(size) \
851 static int vsad_intra ## size ## _c(MPVEncContext *unused, \
852 const uint8_t *s, const uint8_t *dummy, \
853 ptrdiff_t stride, int h) \
855 int score = 0, x, y; \
857 for (y = 1; y < h; y++) { \
858 for (x = 0; x < size; x += 4) { \
859 score += FFABS(s[x] - s[x + stride]) + \
860 FFABS(s[x + 1] - s[x + stride + 1]) + \
861 FFABS(s[x + 2] - s[x + 2 + stride]) + \
862 FFABS(s[x + 3] - s[x + 3 + stride]); \
873 static int vsad ## size ## _c(MPVEncContext *unused, \
874 const uint8_t *s1, const uint8_t *s2, \
875 ptrdiff_t stride, int h) \
877 int score = 0, x, y; \
879 for (y = 1; y < h; y++) { \
880 for (x = 0; x < size; x++) \
881 score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
891 #define SQ(a) ((a) * (a))
892 #define VSSE_INTRA(size) \
893 static int vsse_intra ## size ## _c(MPVEncContext *unused, \
894 const uint8_t *s, const uint8_t *dummy, \
895 ptrdiff_t stride, int h) \
897 int score = 0, x, y; \
899 for (y = 1; y < h; y++) { \
900 for (x = 0; x < size; x += 4) { \
901 score += SQ(s[x] - s[x + stride]) + \
902 SQ(s[x + 1] - s[x + stride + 1]) + \
903 SQ(s[x + 2] - s[x + stride + 2]) + \
904 SQ(s[x + 3] - s[x + stride + 3]); \
915 static int vsse ## size ## _c(MPVEncContext *unused, const uint8_t *s1, \
916 const uint8_t *s2, ptrdiff_t stride, int h) \
918 int score = 0, x, y; \
920 for (y = 1; y < h; y++) { \
921 for (x = 0; x < size; x++) \
922 score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
932 #define WRAPPER8_16_SQ(name8, name16) \
933 static int name16(MPVEncContext *const s, const uint8_t *dst, \
934 const uint8_t *src, ptrdiff_t stride, int h) \
938 score += name8(s, dst, src, stride, 8); \
939 score += name8(s, dst + 8, src + 8, stride, 8); \
943 score += name8(s, dst, src, stride, 8); \
944 score += name8(s, dst + 8, src + 8, stride, 8); \
962 memset(
c, 0,
sizeof(*
c));
976 #define SET_CMP_FUNC(name) \
977 c->name[0] = name ## 16_c; \
978 c->name[1] = name ## 8x8_c;
981 c->hadamard8_diff[4] = hadamard8_intra16_c;
996 c->vsad[0] = vsad16_c;
997 c->vsad[1] = vsad8_c;
998 c->vsad[4] = vsad_intra16_c;
999 c->vsad[5] = vsad_intra8_c;
1000 c->vsse[0] = vsse16_c;
1001 c->vsse[1] = vsse8_c;
1002 c->vsse[4] = vsse_intra16_c;
1003 c->vsse[5] = vsse_intra8_c;
1006 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
static int dct_sad8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
static int sum_abs_dctelem_c(const int16_t *block)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int pix_median_abs16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int nsse16_c(MPVEncContext *const c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h)
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
static int quant_psnr8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
#define bit(string, value)
#define ENTRY(CMP_FLAG, ARRAY, MPVENC_ONLY)
static int hadamard8_diff8x8_c(MPVEncContext *unused, const uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h)
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
static int pix_abs8_x2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int pix_median_abs8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
#define LOCAL_ALIGNED_16(t, v,...)
static int pix_abs16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_me_cmp_init_mips(MECmpContext *c, AVCodecContext *avctx)
static int pix_abs16_y2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static int zero_cmp(MPVEncContext *s, const uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h)
static av_always_inline int cmp(MPVEncContext *const s, const int x, const int y, const int subx, const int suby, const int size, const int h, int ref_index, int src_index, me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags)
compares a block (either a full macroblock or a partition thereof) against a proposed motion-compensa...
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int pix_abs8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
#define UNI_AC_ENC_INDEX(run, level)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx)
void ff_simple_idct_int16_8bit(int16_t *block)
#define i(width, name, range_min, range_max)
static int dct_max8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
static void dct(AudioRNNContext *s, float *out, const float *in)
const EXTERN uint32_t ff_square_tab[512]
static int sse16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int sse4_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int pix_abs8_y2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int pix_abs8_xy2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int pix_abs16_xy2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
main external API structure.
#define WRAPPER8_16_SQ(name8, name16)
static int hadamard8_intra8x8_c(MPVEncContext *unused, const uint8_t *src, const uint8_t *dummy, ptrdiff_t stride, int h)
void ff_me_cmp_init_riscv(MECmpContext *c, AVCodecContext *avctx)
static int bit8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
#define DCT8_1D(src, srcstride, dst, dststride)
static int nsse8_c(MPVEncContext *const c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h)
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
static int pix_abs16_x2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
av_cold void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx)
void ff_dsputil_init_dwt(MECmpContext *c)
#define SET_CMP_FUNC(name)
The exact code depends on how similar the blocks are and how related they are to the block
#define BUTTERFLY2(o1, o2, i1, i2)
static int rd8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
static int sse8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)