Go to the documentation of this file.
38 #if CONFIG_LIBFREETYPE
40 #include FT_FREETYPE_H
43 #if CONFIG_LIBFONTCONFIG
44 #include <fontconfig/fontconfig.h>
49 #define BASEFREQ 20.01523126408007475
50 #define ENDFREQ 20495.59681441799654
51 #define TLENGTH "384*tc/(384+tc*f)"
52 #define TLENGTH_MIN 0.001
53 #define VOLUME_MAX 100.0
54 #define FONTCOLOR "st(0, (midi(f)-59.5)/12);" \
55 "st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
56 "r(1-ld(1)) + b(ld(1))"
57 #define CSCHEME "1|0.5|0|0|0.5|1"
59 #define OFFSET(x) offsetof(ShowCQTContext, x)
60 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
117 av_log(
s->ctx,
level,
"fft_time = %16.3f s.\n",
s->fft_time * 1e-6);
119 av_log(
s->ctx,
level,
"cqt_time = %16.3f s.\n",
s->cqt_time * 1e-6);
120 if (
s->process_cqt_time)
121 av_log(
s->ctx,
level,
"process_cqt_time = %16.3f s.\n",
s->process_cqt_time * 1e-6);
122 if (
s->update_sono_time)
123 av_log(
s->ctx,
level,
"update_sono_time = %16.3f s.\n",
s->update_sono_time * 1e-6);
125 av_log(
s->ctx,
level,
"alloc_time = %16.3f s.\n",
s->alloc_time * 1e-6);
127 av_log(
s->ctx,
level,
"bar_time = %16.3f s.\n",
s->bar_time * 1e-6);
129 av_log(
s->ctx,
level,
"axis_time = %16.3f s.\n",
s->axis_time * 1e-6);
131 av_log(
s->ctx,
level,
"sono_time = %16.3f s.\n",
s->sono_time * 1e-6);
133 plot_time =
s->fft_time +
s->cqt_time +
s->process_cqt_time +
s->update_sono_time
134 +
s->alloc_time +
s->bar_time +
s->axis_time +
s->sono_time;
136 av_log(
s->ctx,
level,
"plot_time = %16.3f s.\n", plot_time * 1e-6);
138 s->fft_time =
s->cqt_time =
s->process_cqt_time =
s->update_sono_time
139 =
s->alloc_time =
s->bar_time =
s->axis_time =
s->sono_time = 0;
141 if (
s->axis_frame && !
s->axis_frame->buf[0]) {
143 for (k = 0; k < 4; k++)
144 s->axis_frame->data[k] =
NULL;
151 for (k = 0; k <
s->cqt_len; k++)
169 double log_base, log_end;
170 double rcp_n = 1.0 / n;
180 for (x = 0; x < n; x++) {
181 double log_freq = log_base + (x + 0.5) * (log_end - log_base) * rcp_n;
182 freq[x] =
exp(log_freq);
189 double nan_replace,
int idx)
193 av_log(log_ctx,
level,
"[%d] %s is nan, setting it to %g.\n",
194 idx,
name, nan_replace);
197 av_log(log_ctx,
level,
"[%d] %s is too low (%g), setting it to %g.\n",
201 av_log(log_ctx,
level,
"[%d] %s it too high (%g), setting it to %g.\n",
210 double ret = 12200.0*12200.0 * (
f*
f*
f*
f);
211 ret /= (
f*
f + 20.6*20.6) * (
f*
f + 12200.0*12200.0) *
212 sqrt((
f*
f + 107.7*107.7) * (
f*
f + 737.9*737.9));
218 double ret = 12200.0*12200.0 * (
f*
f*
f);
219 ret /= (
f*
f + 20.6*20.6) * (
f*
f + 12200.0*12200.0) * sqrt(
f*
f + 158.5*158.5);
225 double ret = 12200.0*12200.0 * (
f*
f);
226 ret /= (
f*
f + 20.6*20.6) * (
f*
f + 12200.0*12200.0);
232 const char *func_names[] = {
"a_weighting",
"b_weighting",
"c_weighting",
NULL };
233 const char *sono_names[] = {
"timeclamp",
"tc",
"frequency",
"freq",
"f",
"bar_v",
NULL };
234 const char *bar_names[] = {
"timeclamp",
"tc",
"frequency",
"freq",
"f",
"sono_v",
NULL };
241 if (!
s->sono_v_buf || !
s->bar_v_buf)
250 for (x = 0; x <
s->cqt_len; x++) {
251 double vars[] = {
s->timeclamp,
s->timeclamp,
s->freq[x],
s->freq[x],
s->freq[x], 0.0 };
255 s->bar_v_buf[x] = vol * vol;
258 s->sono_v_buf[x] = vol * vol;
273 int len,
int fft_len)
276 for (k = 0; k <
len; k++) {
279 for (x = 0; x < coeffs[k].
len; x++) {
280 float u = coeffs[k].
val[x];
285 b.re +=
u *
src[j].re;
286 b.im +=
u *
src[j].im;
295 dst[k].im =
r.re *
r.re +
r.im *
r.im;
301 const char *
var_names[] = {
"timeclamp",
"tc",
"frequency",
"freq",
"f",
NULL };
303 int rate =
s->ctx->inputs[0]->sample_rate;
304 int nb_cqt_coeffs = 0;
311 if (!(
s->coeffs =
av_calloc(
s->cqt_len,
sizeof(*
s->coeffs))))
314 for (k = 0; k <
s->cqt_len; k++) {
315 double vars[] = {
s->timeclamp,
s->timeclamp,
s->freq[k],
s->freq[k],
s->freq[k] };
316 double flen, center, tlength;
317 int start, end, m = k;
319 if (
s->freq[k] > 0.5 * rate)
324 flen = 8.0 *
s->fft_len / (tlength * rate);
325 center =
s->freq[k] *
s->fft_len / rate;
326 start =
FFMAX(0,
ceil(center - 0.5 * flen));
327 end =
FFMIN(
s->fft_len,
floor(center + 0.5 * flen));
329 s->coeffs[m].start = start & ~(
s->cqt_align - 1);
330 s->coeffs[m].len = (end | (
s->cqt_align - 1)) + 1 -
s->coeffs[m].start;
331 nb_cqt_coeffs +=
s->coeffs[m].len;
332 if (!(
s->coeffs[m].val =
av_calloc(
s->coeffs[m].len,
sizeof(*
s->coeffs[m].val))))
335 for (x = start; x <= end; x++) {
336 int sign = (x & 1) ? (-1) : 1;
337 double y = 2.0 *
M_PI * (x - center) * (1.0 / flen);
339 double w = 0.355768 + 0.487396 * cos(y) + 0.144232 * cos(2*y) + 0.012604 * cos(3*y);
340 w *= sign * (1.0 /
s->fft_len);
341 s->coeffs[m].val[x -
s->coeffs[m].start] =
w;
344 if (
s->permute_coeffs)
345 s->permute_coeffs(
s->coeffs[m].val,
s->coeffs[m].len);
355 for (k = 0; k <
s->cqt_len; k++)
375 memset(
out->data[0], 0,
out->linesize[0] *
h);
378 memset(
out->data[0], 16,
out->linesize[0] *
h);
379 memset(
out->data[1], 128,
out->linesize[1] * hh);
380 memset(
out->data[2], 128,
out->linesize[2] * hh);
382 memset(
out->data[3], 0,
out->linesize[3] *
h);
407 uint8_t *tmp_data[4] = {
NULL };
410 int tmp_w, tmp_h,
ret;
412 if ((
ret =
ff_load_image(tmp_data, tmp_linesize, &tmp_w, &tmp_h, &tmp_format,
413 s->axisfile,
s->ctx)) < 0)
422 tmp_format,
s->ctx)) < 0)
425 s->axis_frame->width =
s->width;
426 s->axis_frame->height =
s->axis_h;
437 static double midi(
void *p,
double f)
439 return log2(
f/440.0) * 12.0 + 69.0;
445 return lrint(x*255.0) << 16;
451 return lrint(x*255.0) << 8;
457 return lrint(x*255.0);
462 const char *
var_names[] = {
"timeclamp",
"tc",
"frequency",
"freq",
"f",
NULL };
463 const char *func_names[] = {
"midi",
"r",
"g",
"b",
NULL };
472 av_log(
s->ctx,
AV_LOG_WARNING,
"font axis rendering is not implemented in non-default frequency range,"
473 " please use axisfile option instead.\n");
477 if (
s->cqt_len == 1920)
489 double vars[] = {
s->timeclamp,
s->timeclamp, freq[
xs], freq[
xs], freq[
xs] };
493 int linesize =
tmp->linesize[0];
494 for (y = 0; y <
height; y++) {
495 data[linesize * y + 4 * x] =
r;
496 data[linesize * y + 4 * x + 1] =
g;
497 data[linesize * y + 4 * x + 2] =
b;
509 #if CONFIG_LIBFREETYPE
510 const char *str =
"EF G A BC D ";
512 int linesize =
tmp->linesize[0];
513 FT_Library lib =
NULL;
515 int font_width = 16, font_height = 32;
516 int font_repeat = font_width * 12;
517 int linear_hori_advance = font_width * 65536;
518 int non_monospace_warning = 0;
524 if (FT_Init_FreeType(&lib))
527 if (FT_New_Face(lib, fontfile, 0, &face))
530 if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
533 if (FT_Load_Char(face,
'A', FT_LOAD_RENDER))
536 if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
539 for (x = 0; x < 12; x++) {
540 int sx, sy, rx, bx, by, dx, dy;
545 if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
548 if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
550 non_monospace_warning = 1;
553 sy = font_height - 8 - face->glyph->bitmap_top;
554 for (rx = 0; rx < 10; rx++) {
555 sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
556 for (by = 0; by < face->glyph->bitmap.rows; by++) {
560 if (dy >= font_height)
563 for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
569 data[dy*linesize+4*dx+3] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
576 FT_Done_FreeType(lib);
582 FT_Done_FreeType(lib);
593 #if CONFIG_LIBFONTCONFIG
594 FcConfig *fontconfig;
595 FcPattern *pat, *best;
596 FcResult
result = FcResultMatch;
603 for (
i = 0; font[
i];
i++) {
608 if (!(fontconfig = FcInitLoadConfigAndFonts())) {
613 if (!(pat = FcNameParse((uint8_t *)font))) {
615 FcConfigDestroy(fontconfig);
619 FcDefaultSubstitute(pat);
621 if (!FcConfigSubstitute(fontconfig, pat, FcMatchPattern)) {
623 FcPatternDestroy(pat);
624 FcConfigDestroy(fontconfig);
628 best = FcFontMatch(fontconfig, pat, &
result);
629 FcPatternDestroy(pat);
632 if (!best ||
result != FcResultMatch) {
637 if (FcPatternGetString(best, FC_FILE, 0, (FcChar8 **)&filename) != FcResultMatch) {
645 FcPatternDestroy(best);
646 FcConfigDestroy(fontconfig);
657 const char *str =
"EF G A BC D ";
661 int linesize =
tmp->linesize[0];
665 uint8_t *startptr =
data + 4 * x;
666 for (
u = 0;
u < 12;
u++) {
667 for (v = 0; v <
height; v++) {
668 uint8_t *p = startptr + v * linesize +
height/2 * 4 *
u;
670 if (
mask & vga16_font[str[
u] * 16 + v])
687 int default_font = 0;
712 s->axis_frame->width =
s->width;
713 s->axis_frame->height =
s->axis_h;
733 return expf(logf(v) /
g);
739 for (x = 0; x <
len; x++) {
749 for (x = 0; x <
len; x++) {
754 c[x].yuv.y =
cm[0][0] *
r +
cm[0][1] *
g +
cm[0][2] *
b;
755 c[x].yuv.u =
cm[1][0] *
r +
cm[1][1] *
g +
cm[1][2] *
b;
756 c[x].yuv.v =
cm[2][0] *
r +
cm[2][1] *
g +
cm[2][2] *
b;
763 int x, y,
w =
out->width;
764 float mul, ht, rcp_bar_h = 1.0f / bar_h, rcp_bar_t = 1.0f / bar_t;
765 uint8_t *v =
out->data[0], *lp;
766 int ls =
out->linesize[0];
768 for (y = 0; y < bar_h; y++) {
769 ht = (bar_h - y) * rcp_bar_h;
771 for (x = 0; x <
w; x++) {
777 mul = (
h[x] - ht) * rcp_h[x];
778 mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f;
787 #define DRAW_BAR_WITH_CHROMA(x) \
794 mul = (h[x] - ht) * rcp_h[x]; \
795 mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f; \
796 *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
797 *lpu++ = lrintf(mul * c[x].yuv.u + 128.0f); \
798 *lpv++ = lrintf(mul * c[x].yuv.v + 128.0f); \
802 #define DRAW_BAR_WITHOUT_CHROMA(x) \
807 mul = (h[x] - ht) * rcp_h[x]; \
808 mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f; \
809 *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
816 int x, y, yh,
w =
out->width;
817 float mul, ht, rcp_bar_h = 1.0f / bar_h, rcp_bar_t = 1.0f / bar_t;
818 uint8_t *vy =
out->data[0], *vu =
out->data[1], *vv =
out->data[2];
819 uint8_t *lpy, *lpu, *lpv;
820 int lsy =
out->linesize[0], lsu =
out->linesize[1], lsv =
out->linesize[2];
821 int fmt =
out->format;
823 for (y = 0; y < bar_h; y += 2) {
825 ht = (bar_h - y) * rcp_bar_h;
830 for (x = 0; x <
w; x += 2) {
835 for (x = 0; x <
w; x += 2) {
841 ht = (bar_h - (y+1)) * rcp_bar_h;
842 lpy = vy + (y+1) * lsy;
843 lpu = vu + (y+1) * lsu;
844 lpv = vv + (y+1) * lsv;
846 for (x = 0; x <
w; x += 2) {
851 for (x = 0; x <
w; x += 2) {
856 for (x = 0; x <
w; x += 2) {
867 float a, rcp_255 = 1.0f / 255.0f;
870 for (y = 0; y <
h; y++) {
871 lp =
out->data[0] + (off + y) *
out->linesize[0];
873 for (x = 0; x <
w; x++) {
878 }
else if (lpa[3] == 255) {
883 a = rcp_255 * lpa[3];
893 #define BLEND_WITH_CHROMA(c) \
896 *lpy = lrintf(c.yuv.y + 16.0f); \
897 *lpu = lrintf(c.yuv.u + 128.0f); \
898 *lpv = lrintf(c.yuv.v + 128.0f); \
899 } else if (255 == *lpaa) { \
904 float a = (1.0f/255.0f) * (*lpaa); \
905 *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
906 *lpu = lrintf(a * (*lpau) + (1.0f - a) * (c.yuv.u + 128.0f)); \
907 *lpv = lrintf(a * (*lpav) + (1.0f - a) * (c.yuv.v + 128.0f)); \
909 lpy++; lpu++; lpv++; \
910 lpay++; lpau++; lpav++; lpaa++; \
913 #define BLEND_WITHOUT_CHROMA(c, alpha_inc) \
916 *lpy = lrintf(c.yuv.y + 16.0f); \
917 } else if (255 == *lpaa) { \
920 float a = (1.0f/255.0f) * (*lpaa); \
921 *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
924 lpay++; lpaa += alpha_inc; \
927 #define BLEND_CHROMA2(c) \
929 if (!lpaa[0] && !lpaa[1]) { \
930 *lpu = lrintf(c.yuv.u + 128.0f); \
931 *lpv = lrintf(c.yuv.v + 128.0f); \
932 } else if (255 == lpaa[0] && 255 == lpaa[1]) { \
933 *lpu = *lpau; *lpv = *lpav; \
935 float a0 = (0.5f/255.0f) * lpaa[0]; \
936 float a1 = (0.5f/255.0f) * lpaa[1]; \
937 float b = 1.0f - a0 - a1; \
938 *lpu = lrintf(a0 * lpau[0] + a1 * lpau[1] + b * (c.yuv.u + 128.0f)); \
939 *lpv = lrintf(a0 * lpav[0] + a1 * lpav[1] + b * (c.yuv.v + 128.0f)); \
941 lpau += 2; lpav += 2; lpaa++; lpu++; lpv++; \
944 #define BLEND_CHROMA2x2(c) \
946 if (!lpaa[0] && !lpaa[1] && !lpaa[lsaa] && !lpaa[lsaa+1]) { \
947 *lpu = lrintf(c.yuv.u + 128.0f); \
948 *lpv = lrintf(c.yuv.v + 128.0f); \
949 } else if (255 == lpaa[0] && 255 == lpaa[1] && \
950 255 == lpaa[lsaa] && 255 == lpaa[lsaa+1]) { \
951 *lpu = *lpau; *lpv = *lpav; \
953 float a0 = (0.25f/255.0f) * lpaa[0]; \
954 float a1 = (0.25f/255.0f) * lpaa[1]; \
955 float a2 = (0.25f/255.0f) * lpaa[lsaa]; \
956 float a3 = (0.25f/255.0f) * lpaa[lsaa+1]; \
957 float b = 1.0f - a0 - a1 - a2 - a3; \
958 *lpu = lrintf(a0 * lpau[0] + a1 * lpau[1] + a2 * lpau[lsau] + a3 * lpau[lsau+1] \
959 + b * (c.yuv.u + 128.0f)); \
960 *lpv = lrintf(a0 * lpav[0] + a1 * lpav[1] + a2 * lpav[lsav] + a3 * lpav[lsav+1] \
961 + b * (c.yuv.v + 128.0f)); \
963 lpau += 2; lpav += 2; lpaa++; lpu++; lpv++; \
970 uint8_t *vy =
out->data[0], *vu =
out->data[1], *vv =
out->data[2];
971 uint8_t *vay = axis->
data[0], *vau = axis->
data[1], *vav = axis->
data[2], *vaa = axis->
data[3];
972 int lsy =
out->linesize[0], lsu =
out->linesize[1], lsv =
out->linesize[2];
974 uint8_t *lpy, *lpu, *lpv, *lpay, *lpau, *lpav, *lpaa;
976 for (y = 0; y <
h; y += 2) {
978 lpy = vy + (off + y) * lsy;
979 lpu = vu + (offh + yh) * lsu;
980 lpv = vv + (offh + yh) * lsv;
981 lpay = vay + y * lsay;
982 lpau = vau + y * lsau;
983 lpav = vav + y * lsav;
984 lpaa = vaa + y * lsaa;
986 for (x = 0; x <
w; x += 2) {
991 for (x = 0; x <
w; x += 2) {
997 for (x = 0; x <
w; x += 2) {
1004 lpy = vy + (off + y + 1) * lsy;
1005 lpu = vu + (off + y + 1) * lsu;
1006 lpv = vv + (off + y + 1) * lsv;
1007 lpay = vay + (y + 1) * lsay;
1008 lpau = vau + (y + 1) * lsau;
1009 lpav = vav + (y + 1) * lsav;
1010 lpaa = vaa + (y + 1) * lsaa;
1012 for (x = 0; x <
w; x += 2) {
1017 for (x = 0; x <
w; x += 2) {
1023 for (x = 0; x <
w; x += 2) {
1041 for (y = 0; y <
h; y++) {
1042 memcpy(
out->data[0] + (off + y) *
out->linesize[0],
1046 for (
i = 1;
i < nb_planes;
i++) {
1048 for (y = 0; y <
h; y +=
inc) {
1050 memcpy(
out->data[
i] + (offh + yh) *
out->linesize[
i],
1061 for (x = 0; x <
w; x++) {
1071 uint8_t *lpy = sono->
data[0] + idx * sono->
linesize[0];
1072 uint8_t *lpu = sono->
data[1] + idx * sono->
linesize[1];
1073 uint8_t *lpv = sono->
data[2] + idx * sono->
linesize[2];
1075 for (x = 0; x <
w; x += 2) {
1076 *lpy++ =
lrintf(
c[x].yuv.y + 16.0f);
1077 *lpu++ =
lrintf(
c[x].yuv.u + 128.0f);
1078 *lpv++ =
lrintf(
c[x].yuv.v + 128.0f);
1079 *lpy++ =
lrintf(
c[x+1].yuv.y + 16.0f);
1081 *lpu++ =
lrintf(
c[x+1].yuv.u + 128.0f);
1082 *lpv++ =
lrintf(
c[x+1].yuv.v + 128.0f);
1090 if (!
s->sono_count) {
1091 for (x = 0; x <
s->cqt_len; x++) {
1092 s->h_buf[x] =
s->bar_v_buf[x] * 0.5f * (
s->cqt_result[x].re +
s->cqt_result[x].im);
1094 if (
s->fcount > 1) {
1095 float rcp_fcount = 1.0f /
s->fcount;
1096 for (x = 0; x <
s->width; x++) {
1098 for (
i = 0;
i <
s->fcount;
i++)
1099 h +=
s->h_buf[
s->fcount * x +
i];
1100 s->h_buf[x] = rcp_fcount *
h;
1103 for (x = 0; x <
s->width; x++) {
1105 s->rcp_h_buf[x] = 1.0f / (
s->h_buf[x] + 0.0001f);
1109 for (x = 0; x <
s->cqt_len; x++) {
1110 s->cqt_result[x].re *=
s->sono_v_buf[x];
1111 s->cqt_result[x].im *=
s->sono_v_buf[x];
1114 if (
s->fcount > 1) {
1115 float rcp_fcount = 1.0f /
s->fcount;
1116 for (x = 0; x <
s->width; x++) {
1118 for (
i = 0;
i <
s->fcount;
i++) {
1119 result.re +=
s->cqt_result[
s->fcount * x +
i].re;
1120 result.im +=
s->cqt_result[
s->fcount * x +
i].im;
1122 s->cqt_result[x].re = rcp_fcount *
result.re;
1123 s->cqt_result[x].im = rcp_fcount *
result.im;
1130 yuv_from_cqt(
s->c_buf,
s->cqt_result,
s->sono_g,
s->width,
s->cmatrix,
s->cscheme_v);
1139 #define UPDATE_TIME(t) \
1140 cur_time = av_gettime_relative(); \
1141 t += cur_time - last_time; \
1142 last_time = cur_time
1146 memcpy(
s->fft_input,
s->fft_data,
s->fft_len *
sizeof(*
s->fft_data));
1147 if (
s->attack_data) {
1149 for (k = 0; k <
s->remaining_fill_max; k++) {
1150 s->fft_input[
s->fft_len/2+k].re *=
s->attack_data[k];
1151 s->fft_input[
s->fft_len/2+k].im *=
s->attack_data[k];
1158 s->cqt_calc(
s->cqt_result,
s->fft_result,
s->coeffs,
s->cqt_len,
s->fft_len);
1165 s->update_sono(
s->sono_frame,
s->c_buf,
s->sono_idx);
1169 if (!
s->sono_count) {
1175 out->colorspace =
s->csp;
1179 s->draw_bar(
out,
s->h_buf,
s->rcp_h_buf,
s->c_buf,
s->bar_h,
s->bar_t);
1184 s->draw_axis(
out,
s->axis_frame,
s->c_buf,
s->bar_h);
1189 s->draw_sono(
out,
s->sono_frame,
s->bar_h +
s->axis_h,
s->sono_idx);
1193 s->sono_count = (
s->sono_count + 1) %
s->count;
1195 s->sono_idx = (
s->sono_idx +
s->sono_h - 1) %
s->sono_h;
1211 kr = 0.299; kb = 0.114;
break;
1213 kr = 0.2126; kb = 0.0722;
break;
1215 kr = 0.30; kb = 0.11;
break;
1217 kr = 0.212; kb = 0.087;
break;
1219 kr = 0.2627; kb = 0.0593;
break;
1223 s->cmatrix[0][0] = 219.0 * kr;
1224 s->cmatrix[0][1] = 219.0 * kg;
1225 s->cmatrix[0][2] = 219.0 * kb;
1226 s->cmatrix[1][0] = -112.0 * kr / (1.0 - kb);
1227 s->cmatrix[1][1] = -112.0 * kg / (1.0 - kb);
1228 s->cmatrix[1][2] = 112.0;
1229 s->cmatrix[2][0] = 112.0;
1230 s->cmatrix[2][1] = -112.0 * kg / (1.0 - kr);
1231 s->cmatrix[2][2] = -112.0 * kb / (1.0 - kr);
1239 if (sscanf(
s->cscheme,
" %f | %f | %f | %f | %f | %f %1s", &
s->cscheme_v[0],
1240 &
s->cscheme_v[1], &
s->cscheme_v[2], &
s->cscheme_v[3], &
s->cscheme_v[4],
1241 &
s->cscheme_v[5], tail) != 6)
1244 for (k = 0; k < 6; k++)
1245 if (
isnan(
s->cscheme_v[k]) ||
s->cscheme_v[k] < 0.0f ||
s->cscheme_v[k] > 1.0f)
1263 if (
s->width != 1920 ||
s->height != 1080) {
1272 if (
s->axis_h < 0) {
1273 s->axis_h =
s->width / 60;
1276 if (
s->bar_h >= 0 &&
s->sono_h >= 0)
1277 s->axis_h =
s->height -
s->bar_h -
s->sono_h;
1278 if (
s->bar_h >= 0 &&
s->sono_h < 0)
1279 s->axis_h =
FFMIN(
s->axis_h,
s->height -
s->bar_h);
1280 if (
s->bar_h < 0 &&
s->sono_h >= 0)
1281 s->axis_h =
FFMIN(
s->axis_h,
s->height -
s->sono_h);
1285 s->bar_h = (
s->height -
s->axis_h) / 2;
1289 s->bar_h =
s->height -
s->sono_h -
s->axis_h;
1293 s->sono_h =
s->height -
s->axis_h -
s->bar_h;
1295 if ((
s->width & 1) || (
s->height & 1) || (
s->bar_h & 1) || (
s->axis_h & 1) || (
s->sono_h & 1) ||
1296 (
s->bar_h < 0) || (
s->axis_h < 0) || (
s->sono_h < 0) || (
s->bar_h >
s->height) ||
1297 (
s->axis_h >
s->height) || (
s->sono_h >
s->height) || (
s->bar_h +
s->axis_h +
s->sono_h !=
s->height)) {
1305 }
while(
s->fcount *
s->width < 1920 &&
s->fcount < 10);
1360 outlink->
w =
s->width;
1361 outlink->
h =
s->height;
1368 s->bar_h,
s->axis_h,
s->sono_h);
1370 s->cqt_len =
s->width *
s->fcount;
1378 s->fft_len = 1 <<
s->fft_bits;
1382 s->fft_data =
av_calloc(
s->fft_len,
sizeof(*
s->fft_data));
1386 if (!
s->fft_ctx || !
s->fft_data || !
s->fft_result || !
s->cqt_result)
1389 s->remaining_fill_max =
s->fft_len / 2;
1390 if (
s->attack > 0.0) {
1393 s->remaining_fill_max =
FFMIN(
s->remaining_fill_max,
ceil(
inlink->sample_rate *
s->attack));
1395 if (!
s->attack_data)
1398 for (k = 0; k <
s->remaining_fill_max; k++) {
1399 double y =
M_PI * k / (
inlink->sample_rate *
s->attack);
1400 s->attack_data[k] = 0.355768 + 0.487396 * cos(y) + 0.144232 * cos(2*y) + 0.012604 * cos(3*y);
1406 s->permute_coeffs =
NULL;
1429 }
else if (
s->axisfile) {
1457 if (!
s->h_buf || !
s->rcp_h_buf || !
s->c_buf)
1463 s->remaining_fill =
s->remaining_fill_max;
1464 s->remaining_frac = 0;
1466 s->step = (int)(
s->step_frac.num /
s->step_frac.den);
1467 s->step_frac.num %=
s->step_frac.den;
1468 if (
s->step_frac.num) {
1470 inlink->sample_rate,
s->step,
s->step_frac.num,
s->step_frac.den);
1473 inlink->sample_rate,
s->step);
1485 int remaining,
step,
ret, x,
i, j, m, got_frame = 0;
1490 while (
s->remaining_fill <
s->remaining_fill_max) {
1491 memset(&
s->fft_data[
s->fft_len/2 +
s->remaining_fill_max -
s->remaining_fill], 0,
sizeof(*
s->fft_data) *
s->remaining_fill);
1496 step =
s->step + (
s->step_frac.num +
s->remaining_frac) /
s->step_frac.den;
1497 s->remaining_frac = (
s->step_frac.num +
s->remaining_frac) %
s->step_frac.den;
1498 for (x = 0; x < (
s->fft_len/2 +
s->remaining_fill_max -
step); x++)
1499 s->fft_data[x] =
s->fft_data[x+
step];
1500 s->remaining_fill +=
step;
1504 out->pts =
s->next_pts;
1513 audio_data = (
float*) insamples->
data[0];
1517 j =
s->fft_len/2 +
s->remaining_fill_max -
s->remaining_fill;
1518 if (remaining >=
s->remaining_fill) {
1519 for (m = 0; m <
s->remaining_fill; m++) {
1520 s->fft_data[j+m].re = audio_data[2*(
i+m)];
1521 s->fft_data[j+m].im = audio_data[2*(
i+m)+1];
1528 remaining -=
s->remaining_fill;
1542 step =
s->step + (
s->step_frac.num +
s->remaining_frac) /
s->step_frac.den;
1543 s->remaining_frac = (
s->step_frac.num +
s->remaining_frac) %
s->step_frac.den;
1544 for (m = 0; m <
s->fft_len/2 +
s->remaining_fill_max -
step; m++)
1545 s->fft_data[m] =
s->fft_data[m+
step];
1546 s->remaining_fill =
step;
1548 for (m = 0; m < remaining; m++) {
1549 s->fft_data[j+m].re = audio_data[2*(
i+m)];
1550 s->fft_data[j+m].im = audio_data[2*(
i+m)+1];
1552 s->remaining_fill -= remaining;
1573 nb_samples =
s->step + (
s->step_frac.num +
s->remaining_frac) /
s->step_frac.den;
1603 .
p.
name =
"showcqt",
1604 .p.description =
NULL_IF_CONFIG_SMALL(
"Convert input audio to a CQT (Constant/Clamped Q Transform) spectrum video output."),
1605 .p.priv_class = &showcqt_class,
static void error(const char *err)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_CHANNEL_LAYOUT_STEREO_DOWNMIX
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
static void draw_axis_yuv(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void draw_bar_yuv(AVFrame *out, const float *h, const float *rcp_h, const ColorFloat *c, int bar_h, float bar_t)
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
#define AV_CHANNEL_LAYOUT_STEREO
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static enum AVSampleFormat sample_fmts[]
#define AVERROR_EOF
End of file.
@ AV_OPT_TYPE_VIDEO_RATE
Underlying C type is AVRational.
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
static int init_cscheme(ShowCQTContext *s)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define u(width, name, range_min, range_max)
static uint8_t half(int a, int b)
#define BLEND_WITH_CHROMA(c)
#define AV_LOG_VERBOSE
Detailed information.
static double c_weighting(void *p, double f)
const FFFilter ff_avf_showcqt
const char * name
Filter name.
static int render_default_font(AVFrame *tmp)
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
static void draw_axis_rgb(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Link properties exposed to filter code, but not external callers.
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
const uint8_t * avpriv_vga16_font_get(void)
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
static double a_weighting(void *p, double f)
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
static double val(void *priv, double ch)
static av_cold void uninit(AVFilterContext *ctx)
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
A filter pad used for either input or output.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static __device__ float ceil(float a)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
#define BLEND_WITHOUT_CHROMA(c, alpha_inc)
#define DRAW_BAR_WITHOUT_CHROMA(x)
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
static void init_colormatrix(ShowCQTContext *s)
static const char *const var_names[]
static __device__ float floor(float a)
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
@ AV_OPT_TYPE_DOUBLE
Underlying C type is double.
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
static enum AVPixelFormat pix_fmts[]
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
#define BLEND_CHROMA2x2(c)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static double clip_with_log(void *log_ctx, const char *name, double val, double min, double max, double nan_replace, int idx)
#define xs(width, name, var, subs,...)
and forward the result(frame or status change) to the corresponding input. If nothing is possible
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
static av_cold int init(AVFilterContext *ctx)
static const uint8_t vars[2][12]
static int render_freetype(ShowCQTContext *s, AVFrame *tmp, char *fontfile)
@ AV_OPT_TYPE_IMAGE_SIZE
Underlying C type is two consecutive integers.
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
static __device__ float sqrtf(float a)
static void rgb_from_cqt(ColorFloat *c, const AVComplexFloat *v, float g, int len, float cscheme[6])
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
static AVFrame * alloc_frame_empty(enum AVPixelFormat format, int w, int h)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static int inc(int num, int period)
static FilterLink * ff_filter_link(AVFilterLink *link)
static int init_axis_from_file(ShowCQTContext *s)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static const AVFilterPad showcqt_outputs[]
An AVChannelLayout holds information about the channel layout of audio data.
static void process_cqt(ShowCQTContext *s)
static float calculate_gamma(float v, float g)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static int init_axis_color(ShowCQTContext *s, AVFrame *tmp, int half)
int format
agreed upon media format
static int init_axis_from_font(ShowCQTContext *s)
static AVRational av_make_q(int num, int den)
Create an AVRational.
static void update_sono_rgb(AVFrame *sono, const ColorFloat *c, int idx)
int ff_load_image(uint8_t *data[4], int linesize[4], int *w, int *h, enum AVPixelFormat *pix_fmt, const char *filename, void *log_ctx)
Load image from filename and put the resulting image in data.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
AVFilterContext * src
source filter
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
static int init_volume(ShowCQTContext *s)
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static void yuv_from_cqt(ColorFloat *c, const AVComplexFloat *v, float gamma, int len, float cm[3][3], float cscheme[6])
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
static enum AVPixelFormat convert_axis_pixel_format(enum AVPixelFormat format)
static double * create_freq_table(double base, double end, int n)
int nb_samples
number of audio samples (per channel) described by this frame
static double b_weighting(void *p, double f)
#define i(width, name, range_min, range_max)
static double b_func(void *p, double x)
static double r_func(void *p, double x)
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
int w
agreed upon image width
#define av_malloc_array(a, b)
AVSampleFormat
Audio sample formats.
#define FILTER_QUERY_FUNC2(func)
static av_always_inline float cbrtf(float x)
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
static void draw_bar_rgb(AVFrame *out, const float *h, const float *rcp_h, const ColorFloat *c, int bar_h, float bar_t)
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
void * av_calloc(size_t nmemb, size_t size)
static void cqt_calc(AVComplexFloat *dst, const AVComplexFloat *src, const Coeffs *coeffs, int len, int fft_len)
static int activate(AVFilterContext *ctx)
static void common_uninit(ShowCQTContext *s)
static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
static int config_output(AVFilterLink *outlink)
static const AVOption showcqt_options[]
int h
agreed upon image height
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
@ AV_OPT_TYPE_INT
Underlying C type is int.
static double midi(void *p, double f)
static int init_cqt(ShowCQTContext *s)
static void update_sono_yuv(AVFrame *sono, const ColorFloat *c, int idx)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
void ff_showcqt_init_x86(ShowCQTContext *s)
static double g_func(void *p, double x)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
AVFilter p
The public AVFilter.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static void scale(int *out, const int *in, const int w, const int h, const int shift)
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
static const uint16_t channel_layouts[7]
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable.
AVFILTER_DEFINE_CLASS(showcqt)
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4], int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt, uint8_t *const src_data[4], int src_linesize[4], int src_w, int src_h, enum AVPixelFormat src_pix_fmt, void *log_ctx)
Scale image using libswscale.
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
static void draw_sono(AVFrame *out, AVFrame *sono, int off, int idx)
#define DRAW_BAR_WITH_CHROMA(x)
static int init_axis_empty(ShowCQTContext *s)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
static int render_fontconfig(ShowCQTContext *s, AVFrame *tmp, char *font)