FFmpeg
avf_showcqt.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015 Muhammad Faiz <mfcc64@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "config.h"
22 #include "libavutil/attributes.h"
23 #include "libavutil/mem.h"
24 #include "libavutil/tx.h"
26 #include "libavutil/opt.h"
28 #include "libavutil/eval.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/time.h"
31 #include "audio.h"
32 #include "avfilter.h"
33 #include "filters.h"
34 #include "formats.h"
35 #include "lavfutils.h"
36 #include "lswsutils.h"
37 #include "video.h"
38 
39 #if CONFIG_LIBFREETYPE
40 #include <ft2build.h>
41 #include FT_FREETYPE_H
42 #endif
43 
44 #if CONFIG_LIBFONTCONFIG
45 #include <fontconfig/fontconfig.h>
46 #endif
47 
48 #include "avf_showcqt.h"
49 
50 #define BASEFREQ 20.01523126408007475
51 #define ENDFREQ 20495.59681441799654
52 #define TLENGTH "384*tc/(384+tc*f)"
53 #define TLENGTH_MIN 0.001
54 #define VOLUME_MAX 100.0
55 #define FONTCOLOR "st(0, (midi(f)-59.5)/12);" \
56  "st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
57  "r(1-ld(1)) + b(ld(1))"
58 #define CSCHEME "1|0.5|0|0|0.5|1"
59 
60 #define OFFSET(x) offsetof(ShowCQTContext, x)
61 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
62 
63 static const AVOption showcqt_options[] = {
64  { "size", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS },
65  { "s", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS },
66  { "fps", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 1, 1000, FLAGS },
67  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 1, 1000, FLAGS },
68  { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 1, 1000, FLAGS },
69  { "bar_h", "set bargraph height", OFFSET(bar_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
70  { "axis_h", "set axis height", OFFSET(axis_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
71  { "sono_h", "set sonogram height", OFFSET(sono_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
72  { "fullhd", "set fullhd size", OFFSET(fullhd), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
73  { "sono_v", "set sonogram volume", OFFSET(sono_v), AV_OPT_TYPE_STRING, { .str = "16" }, 0, 0, FLAGS },
74  { "volume", "set sonogram volume", OFFSET(sono_v), AV_OPT_TYPE_STRING, { .str = "16" }, 0, 0, FLAGS },
75  { "bar_v", "set bargraph volume", OFFSET(bar_v), AV_OPT_TYPE_STRING, { .str = "sono_v" }, 0, 0, FLAGS },
76  { "volume2", "set bargraph volume", OFFSET(bar_v), AV_OPT_TYPE_STRING, { .str = "sono_v" }, 0, 0, FLAGS },
77  { "sono_g", "set sonogram gamma", OFFSET(sono_g), AV_OPT_TYPE_FLOAT, { .dbl = 3.0 }, 1.0, 7.0, FLAGS },
78  { "gamma", "set sonogram gamma", OFFSET(sono_g), AV_OPT_TYPE_FLOAT, { .dbl = 3.0 }, 1.0, 7.0, FLAGS },
79  { "bar_g", "set bargraph gamma", OFFSET(bar_g), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 7.0, FLAGS },
80  { "gamma2", "set bargraph gamma", OFFSET(bar_g), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 7.0, FLAGS },
81  { "bar_t", "set bar transparency", OFFSET(bar_t), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 0.0, 1.0, FLAGS },
82  { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.002, 1.0, FLAGS },
83  { "tc", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.002, 1.0, FLAGS },
84  { "attack", "set attack time", OFFSET(attack), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0.0, 1.0, FLAGS },
85  { "basefreq", "set base frequency", OFFSET(basefreq), AV_OPT_TYPE_DOUBLE, { .dbl = BASEFREQ }, 10.0, 100000.0, FLAGS },
86  { "endfreq", "set end frequency", OFFSET(endfreq), AV_OPT_TYPE_DOUBLE, { .dbl = ENDFREQ }, 10.0, 100000.0, FLAGS },
87  { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 0.1, 10.0, FLAGS },
88  { "tlength", "set tlength", OFFSET(tlength), AV_OPT_TYPE_STRING, { .str = TLENGTH }, 0, 0, FLAGS },
89  { "count", "set transform count", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
90  { "fcount", "set frequency count", OFFSET(fcount), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 10, FLAGS },
91  { "fontfile", "set axis font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
92  { "font", "set axis font", OFFSET(font), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
93  { "fontcolor", "set font color", OFFSET(fontcolor), AV_OPT_TYPE_STRING, { .str = FONTCOLOR }, 0, 0, FLAGS },
94  { "axisfile", "set axis image", OFFSET(axisfile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
95  { "axis", "draw axis", OFFSET(axis), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
96  { "text", "draw axis", OFFSET(axis), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
97  { "csp", "set color space", OFFSET(csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED }, 0, INT_MAX, FLAGS, .unit = "csp" },
98  { "unspecified", "unspecified", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_UNSPECIFIED }, 0, 0, FLAGS, .unit = "csp" },
99  { "bt709", "bt709", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT709 }, 0, 0, FLAGS, .unit = "csp" },
100  { "fcc", "fcc", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_FCC }, 0, 0, FLAGS, .unit = "csp" },
101  { "bt470bg", "bt470bg", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT470BG }, 0, 0, FLAGS, .unit = "csp" },
102  { "smpte170m", "smpte170m", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_SMPTE170M }, 0, 0, FLAGS, .unit = "csp" },
103  { "smpte240m", "smpte240m", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_SMPTE240M }, 0, 0, FLAGS, .unit = "csp" },
104  { "bt2020ncl", "bt2020ncl", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT2020_NCL }, 0, 0, FLAGS, .unit = "csp" },
105  { "cscheme", "set color scheme", OFFSET(cscheme), AV_OPT_TYPE_STRING, { .str = CSCHEME }, 0, 0, FLAGS },
106  { NULL }
107 };
108 
109 AVFILTER_DEFINE_CLASS(showcqt);
110 
112 {
113  int k;
114  int level = AV_LOG_DEBUG;
115  int64_t plot_time;
116 
117  if (s->fft_time)
118  av_log(s->ctx, level, "fft_time = %16.3f s.\n", s->fft_time * 1e-6);
119  if (s->cqt_time)
120  av_log(s->ctx, level, "cqt_time = %16.3f s.\n", s->cqt_time * 1e-6);
121  if (s->process_cqt_time)
122  av_log(s->ctx, level, "process_cqt_time = %16.3f s.\n", s->process_cqt_time * 1e-6);
123  if (s->update_sono_time)
124  av_log(s->ctx, level, "update_sono_time = %16.3f s.\n", s->update_sono_time * 1e-6);
125  if (s->alloc_time)
126  av_log(s->ctx, level, "alloc_time = %16.3f s.\n", s->alloc_time * 1e-6);
127  if (s->bar_time)
128  av_log(s->ctx, level, "bar_time = %16.3f s.\n", s->bar_time * 1e-6);
129  if (s->axis_time)
130  av_log(s->ctx, level, "axis_time = %16.3f s.\n", s->axis_time * 1e-6);
131  if (s->sono_time)
132  av_log(s->ctx, level, "sono_time = %16.3f s.\n", s->sono_time * 1e-6);
133 
134  plot_time = s->fft_time + s->cqt_time + s->process_cqt_time + s->update_sono_time
135  + s->alloc_time + s->bar_time + s->axis_time + s->sono_time;
136  if (plot_time)
137  av_log(s->ctx, level, "plot_time = %16.3f s.\n", plot_time * 1e-6);
138 
139  s->fft_time = s->cqt_time = s->process_cqt_time = s->update_sono_time
140  = s->alloc_time = s->bar_time = s->axis_time = s->sono_time = 0;
141  /* axis_frame may be non reference counted frame */
142  if (s->axis_frame && !s->axis_frame->buf[0]) {
143  av_freep(s->axis_frame->data);
144  for (k = 0; k < 4; k++)
145  s->axis_frame->data[k] = NULL;
146  }
147 
148  av_frame_free(&s->axis_frame);
149  av_frame_free(&s->sono_frame);
150  av_tx_uninit(&s->fft_ctx);
151  if (s->coeffs)
152  for (k = 0; k < s->cqt_len; k++)
153  av_freep(&s->coeffs[k].val);
154  av_freep(&s->coeffs);
155  av_freep(&s->fft_data);
156  av_freep(&s->fft_input);
157  av_freep(&s->fft_result);
158  av_freep(&s->cqt_result);
159  av_freep(&s->attack_data);
160  av_freep(&s->c_buf);
161  av_freep(&s->h_buf);
162  av_freep(&s->rcp_h_buf);
163  av_freep(&s->freq);
164  av_freep(&s->sono_v_buf);
165  av_freep(&s->bar_v_buf);
166 }
167 
168 static double *create_freq_table(double base, double end, int n)
169 {
170  double log_base, log_end;
171  double rcp_n = 1.0 / n;
172  double *freq;
173  int x;
174 
175  freq = av_malloc_array(n, sizeof(*freq));
176  if (!freq)
177  return NULL;
178 
179  log_base = log(base);
180  log_end = log(end);
181  for (x = 0; x < n; x++) {
182  double log_freq = log_base + (x + 0.5) * (log_end - log_base) * rcp_n;
183  freq[x] = exp(log_freq);
184  }
185  return freq;
186 }
187 
188 static double clip_with_log(void *log_ctx, const char *name,
189  double val, double min, double max,
190  double nan_replace, int idx)
191 {
192  int level = AV_LOG_WARNING;
193  if (isnan(val)) {
194  av_log(log_ctx, level, "[%d] %s is nan, setting it to %g.\n",
195  idx, name, nan_replace);
196  val = nan_replace;
197  } else if (val < min) {
198  av_log(log_ctx, level, "[%d] %s is too low (%g), setting it to %g.\n",
199  idx, name, val, min);
200  val = min;
201  } else if (val > max) {
202  av_log(log_ctx, level, "[%d] %s it too high (%g), setting it to %g.\n",
203  idx, name, val, max);
204  val = max;
205  }
206  return val;
207 }
208 
209 static double a_weighting(void *p, double f)
210 {
211  double ret = 12200.0*12200.0 * (f*f*f*f);
212  ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) *
213  sqrt((f*f + 107.7*107.7) * (f*f + 737.9*737.9));
214  return ret;
215 }
216 
217 static double b_weighting(void *p, double f)
218 {
219  double ret = 12200.0*12200.0 * (f*f*f);
220  ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) * sqrt(f*f + 158.5*158.5);
221  return ret;
222 }
223 
224 static double c_weighting(void *p, double f)
225 {
226  double ret = 12200.0*12200.0 * (f*f);
227  ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0);
228  return ret;
229 }
230 
232 {
233  const char *func_names[] = { "a_weighting", "b_weighting", "c_weighting", NULL };
234  const char *sono_names[] = { "timeclamp", "tc", "frequency", "freq", "f", "bar_v", NULL };
235  const char *bar_names[] = { "timeclamp", "tc", "frequency", "freq", "f", "sono_v", NULL };
236  double (*funcs[])(void *, double) = { a_weighting, b_weighting, c_weighting };
237  AVExpr *sono = NULL, *bar = NULL;
238  int x, ret = AVERROR(ENOMEM);
239 
240  s->sono_v_buf = av_malloc_array(s->cqt_len, sizeof(*s->sono_v_buf));
241  s->bar_v_buf = av_malloc_array(s->cqt_len, sizeof(*s->bar_v_buf));
242  if (!s->sono_v_buf || !s->bar_v_buf)
243  goto error;
244 
245  if ((ret = av_expr_parse(&sono, s->sono_v, sono_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0)
246  goto error;
247 
248  if ((ret = av_expr_parse(&bar, s->bar_v, bar_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0)
249  goto error;
250 
251  for (x = 0; x < s->cqt_len; x++) {
252  double vars[] = { s->timeclamp, s->timeclamp, s->freq[x], s->freq[x], s->freq[x], 0.0 };
253  double vol = clip_with_log(s->ctx, "sono_v", av_expr_eval(sono, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
254  vars[5] = vol;
255  vol = clip_with_log(s->ctx, "bar_v", av_expr_eval(bar, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
256  s->bar_v_buf[x] = vol * vol;
257  vars[5] = vol;
258  vol = clip_with_log(s->ctx, "sono_v", av_expr_eval(sono, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
259  s->sono_v_buf[x] = vol * vol;
260  }
261  av_expr_free(sono);
262  av_expr_free(bar);
263  return 0;
264 
265 error:
266  av_freep(&s->sono_v_buf);
267  av_freep(&s->bar_v_buf);
268  av_expr_free(sono);
269  av_expr_free(bar);
270  return ret;
271 }
272 
273 static void cqt_calc(AVComplexFloat *dst, const AVComplexFloat *src, const Coeffs *coeffs,
274  int len, int fft_len)
275 {
276  int k, x, i, j;
277  for (k = 0; k < len; k++) {
278  AVComplexFloat l, r, a = {0,0}, b = {0,0};
279 
280  for (x = 0; x < coeffs[k].len; x++) {
281  float u = coeffs[k].val[x];
282  i = coeffs[k].start + x;
283  j = fft_len - i;
284  a.re += u * src[i].re;
285  a.im += u * src[i].im;
286  b.re += u * src[j].re;
287  b.im += u * src[j].im;
288  }
289 
290  /* separate left and right, (and multiply by 2.0) */
291  l.re = a.re + b.re;
292  l.im = a.im - b.im;
293  r.re = b.im + a.im;
294  r.im = b.re - a.re;
295  dst[k].re = l.re * l.re + l.im * l.im;
296  dst[k].im = r.re * r.re + r.im * r.im;
297  }
298 }
299 
301 {
302  const char *var_names[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
303  AVExpr *expr = NULL;
304  int rate = s->ctx->inputs[0]->sample_rate;
305  int nb_cqt_coeffs = 0;
306  int k, x, ret;
307 
308  if ((ret = av_expr_parse(&expr, s->tlength, var_names, NULL, NULL, NULL, NULL, 0, s->ctx)) < 0)
309  goto error;
310 
311  ret = AVERROR(ENOMEM);
312  if (!(s->coeffs = av_calloc(s->cqt_len, sizeof(*s->coeffs))))
313  goto error;
314 
315  for (k = 0; k < s->cqt_len; k++) {
316  double vars[] = { s->timeclamp, s->timeclamp, s->freq[k], s->freq[k], s->freq[k] };
317  double flen, center, tlength;
318  int start, end, m = k;
319 
320  if (s->freq[k] > 0.5 * rate)
321  continue;
322  tlength = clip_with_log(s->ctx, "tlength", av_expr_eval(expr, vars, NULL),
323  TLENGTH_MIN, s->timeclamp, s->timeclamp, k);
324 
325  flen = 8.0 * s->fft_len / (tlength * rate);
326  center = s->freq[k] * s->fft_len / rate;
327  start = FFMAX(0, ceil(center - 0.5 * flen));
328  end = FFMIN(s->fft_len, floor(center + 0.5 * flen));
329 
330  s->coeffs[m].start = start & ~(s->cqt_align - 1);
331  s->coeffs[m].len = (end | (s->cqt_align - 1)) + 1 - s->coeffs[m].start;
332  nb_cqt_coeffs += s->coeffs[m].len;
333  if (!(s->coeffs[m].val = av_calloc(s->coeffs[m].len, sizeof(*s->coeffs[m].val))))
334  goto error;
335 
336  for (x = start; x <= end; x++) {
337  int sign = (x & 1) ? (-1) : 1;
338  double y = 2.0 * M_PI * (x - center) * (1.0 / flen);
339  /* nuttall window */
340  double w = 0.355768 + 0.487396 * cos(y) + 0.144232 * cos(2*y) + 0.012604 * cos(3*y);
341  w *= sign * (1.0 / s->fft_len);
342  s->coeffs[m].val[x - s->coeffs[m].start] = w;
343  }
344 
345  if (s->permute_coeffs)
346  s->permute_coeffs(s->coeffs[m].val, s->coeffs[m].len);
347  }
348 
349  av_expr_free(expr);
350  av_log(s->ctx, AV_LOG_VERBOSE, "nb_cqt_coeffs = %d.\n", nb_cqt_coeffs);
351  return 0;
352 
353 error:
354  av_expr_free(expr);
355  if (s->coeffs)
356  for (k = 0; k < s->cqt_len; k++)
357  av_freep(&s->coeffs[k].val);
358  av_freep(&s->coeffs);
359  return ret;
360 }
361 
363 {
364  AVFrame *out;
365  out = av_frame_alloc();
366  if (!out)
367  return NULL;
368  out->format = format;
369  out->width = w;
370  out->height = h;
371  if (av_frame_get_buffer(out, 0) < 0) {
372  av_frame_free(&out);
373  return NULL;
374  }
376  memset(out->data[0], 0, out->linesize[0] * h);
377  } else {
378  int hh = (format == AV_PIX_FMT_YUV420P || format == AV_PIX_FMT_YUVA420P) ? h / 2 : h;
379  memset(out->data[0], 16, out->linesize[0] * h);
380  memset(out->data[1], 128, out->linesize[1] * hh);
381  memset(out->data[2], 128, out->linesize[2] * hh);
382  if (out->data[3])
383  memset(out->data[3], 0, out->linesize[3] * h);
384  }
385  return out;
386 }
387 
389 {
390  switch (format) {
391  case AV_PIX_FMT_RGB24: format = AV_PIX_FMT_RGBA; break;
392  case AV_PIX_FMT_YUV444P:
393  case AV_PIX_FMT_YUV422P:
395  }
396  return format;
397 }
398 
400 {
401  if (!(s->axis_frame = alloc_frame_empty(convert_axis_pixel_format(s->format), s->width, s->axis_h)))
402  return AVERROR(ENOMEM);
403  return 0;
404 }
405 
407 {
408  AVFrame *tmp_frame;
409  int ret = ff_load_image(&tmp_frame, s->axisfile, s->ctx);
410  if (ret < 0)
411  return ret;
412 
413  ret = AVERROR(ENOMEM);
414  if (!(s->axis_frame = av_frame_alloc()))
415  goto error;
416 
417  ret = ff_scale_image(s->axis_frame->data, s->axis_frame->linesize, s->width, s->axis_h,
418  convert_axis_pixel_format(s->format),
419  tmp_frame->data, tmp_frame->linesize, tmp_frame->width, tmp_frame->height,
420  tmp_frame->format, s->ctx);
421  if (ret < 0) {
422  av_frame_free(&s->axis_frame);
423  goto error;
424  }
425 
426  s->axis_frame->width = s->width;
427  s->axis_frame->height = s->axis_h;
428  s->axis_frame->format = convert_axis_pixel_format(s->format);
429 
430  ret = 0;
431 error:
432  av_frame_free(&tmp_frame);
433  return ret;
434 }
435 
436 static double midi(void *p, double f)
437 {
438  return log2(f/440.0) * 12.0 + 69.0;
439 }
440 
441 static double r_func(void *p, double x)
442 {
443  x = av_clipd(x, 0.0, 1.0);
444  return lrint(x*255.0) << 16;
445 }
446 
447 static double g_func(void *p, double x)
448 {
449  x = av_clipd(x, 0.0, 1.0);
450  return lrint(x*255.0) << 8;
451 }
452 
453 static double b_func(void *p, double x)
454 {
455  x = av_clipd(x, 0.0, 1.0);
456  return lrint(x*255.0);
457 }
458 
460 {
461  const char *var_names[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
462  const char *func_names[] = { "midi", "r", "g", "b", NULL };
463  double (*funcs[])(void *, double) = { midi, r_func, g_func, b_func };
464  AVExpr *expr = NULL;
465  double *freq = NULL;
466  int x, xs, y, ret;
467  int width = half ? 1920/2 : 1920, height = half ? 16 : 32;
468  int step = half ? 2 : 1;
469 
470  if (s->basefreq != (double) BASEFREQ || s->endfreq != (double) ENDFREQ) {
471  av_log(s->ctx, AV_LOG_WARNING, "font axis rendering is not implemented in non-default frequency range,"
472  " please use axisfile option instead.\n");
473  return AVERROR(EINVAL);
474  }
475 
476  if (s->cqt_len == 1920)
477  freq = s->freq;
478  else if (!(freq = create_freq_table(s->basefreq, s->endfreq, 1920)))
479  return AVERROR(ENOMEM);
480 
481  if ((ret = av_expr_parse(&expr, s->fontcolor, var_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0) {
482  if (freq != s->freq)
483  av_freep(&freq);
484  return ret;
485  }
486 
487  for (x = 0, xs = 0; x < width; x++, xs += step) {
488  double vars[] = { s->timeclamp, s->timeclamp, freq[xs], freq[xs], freq[xs] };
489  int color = (int) av_expr_eval(expr, vars, NULL);
490  uint8_t r = (color >> 16) & 0xFF, g = (color >> 8) & 0xFF, b = color & 0xFF;
491  uint8_t *data = tmp->data[0];
492  int linesize = tmp->linesize[0];
493  for (y = 0; y < height; y++) {
494  data[linesize * y + 4 * x] = r;
495  data[linesize * y + 4 * x + 1] = g;
496  data[linesize * y + 4 * x + 2] = b;
497  }
498  }
499 
500  av_expr_free(expr);
501  if (freq != s->freq)
502  av_freep(&freq);
503  return 0;
504 }
505 
506 static int render_freetype(ShowCQTContext *s, AVFrame *tmp, char *fontfile)
507 {
508 #if CONFIG_LIBFREETYPE
509  const char *str = "EF G A BC D ";
510  uint8_t *data = tmp->data[0];
511  int linesize = tmp->linesize[0];
512  FT_Library lib = NULL;
513  FT_Face face = NULL;
514  int font_width = 16, font_height = 32;
515  int font_repeat = font_width * 12;
516  int linear_hori_advance = font_width * 65536;
517  int non_monospace_warning = 0;
518  int x;
519 
520  if (!fontfile)
521  return AVERROR(EINVAL);
522 
523  if (FT_Init_FreeType(&lib))
524  goto fail;
525 
526  if (FT_New_Face(lib, fontfile, 0, &face))
527  goto fail;
528 
529  if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
530  goto fail;
531 
532  if (FT_Load_Char(face, 'A', FT_LOAD_RENDER))
533  goto fail;
534 
535  if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
536  goto fail;
537 
538  for (x = 0; x < 12; x++) {
539  int sx, sy, rx, bx, by, dx, dy;
540 
541  if (str[x] == ' ')
542  continue;
543 
544  if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
545  goto fail;
546 
547  if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
548  av_log(s->ctx, AV_LOG_WARNING, "font is not monospace.\n");
549  non_monospace_warning = 1;
550  }
551 
552  sy = font_height - 8 - face->glyph->bitmap_top;
553  for (rx = 0; rx < 10; rx++) {
554  sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
555  for (by = 0; by < face->glyph->bitmap.rows; by++) {
556  dy = by + sy;
557  if (dy < 0)
558  continue;
559  if (dy >= font_height)
560  break;
561 
562  for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
563  dx = bx + sx;
564  if (dx < 0)
565  continue;
566  if (dx >= 1920)
567  break;
568  data[dy*linesize+4*dx+3] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
569  }
570  }
571  }
572  }
573 
574  FT_Done_Face(face);
575  FT_Done_FreeType(lib);
576  return 0;
577 
578 fail:
579  av_log(s->ctx, AV_LOG_WARNING, "error while loading freetype font.\n");
580  FT_Done_Face(face);
581  FT_Done_FreeType(lib);
582  return AVERROR(EINVAL);
583 #else
584  if (fontfile)
585  av_log(s->ctx, AV_LOG_WARNING, "freetype is not available, ignoring fontfile option.\n");
586  return AVERROR(EINVAL);
587 #endif
588 }
589 
590 static int render_fontconfig(ShowCQTContext *s, AVFrame *tmp, char* font)
591 {
592 #if CONFIG_LIBFONTCONFIG
593  FcConfig *fontconfig;
594  FcPattern *pat, *best;
595  FcResult result = FcResultMatch;
596  char *filename;
597  int i, ret;
598 
599  if (!font)
600  return AVERROR(EINVAL);
601 
602  for (i = 0; font[i]; i++) {
603  if (font[i] == '|')
604  font[i] = ':';
605  }
606 
607  if (!(fontconfig = FcInitLoadConfigAndFonts())) {
608  av_log(s->ctx, AV_LOG_ERROR, "impossible to init fontconfig.\n");
609  return AVERROR_UNKNOWN;
610  }
611 
612  if (!(pat = FcNameParse((uint8_t *)font))) {
613  av_log(s->ctx, AV_LOG_ERROR, "could not parse fontconfig pat.\n");
614  FcConfigDestroy(fontconfig);
615  return AVERROR(EINVAL);
616  }
617 
618  FcDefaultSubstitute(pat);
619 
620  if (!FcConfigSubstitute(fontconfig, pat, FcMatchPattern)) {
621  av_log(s->ctx, AV_LOG_ERROR, "could not substitute fontconfig options.\n");
622  FcPatternDestroy(pat);
623  FcConfigDestroy(fontconfig);
624  return AVERROR(ENOMEM);
625  }
626 
627  best = FcFontMatch(fontconfig, pat, &result);
628  FcPatternDestroy(pat);
629 
630  ret = AVERROR(EINVAL);
631  if (!best || result != FcResultMatch) {
632  av_log(s->ctx, AV_LOG_ERROR, "cannot find a valid font for %s.\n", font);
633  goto fail;
634  }
635 
636  if (FcPatternGetString(best, FC_FILE, 0, (FcChar8 **)&filename) != FcResultMatch) {
637  av_log(s->ctx, AV_LOG_ERROR, "no file path for %s\n", font);
638  goto fail;
639  }
640 
641  ret = render_freetype(s, tmp, filename);
642 
643 fail:
644  FcPatternDestroy(best);
645  FcConfigDestroy(fontconfig);
646  return ret;
647 #else
648  if (font)
649  av_log(s->ctx, AV_LOG_WARNING, "fontconfig is not available, ignoring font option.\n");
650  return AVERROR(EINVAL);
651 #endif
652 }
653 
655 {
656  const char *str = "EF G A BC D ";
657  const uint8_t *vga16_font = avpriv_vga16_font_get();
658  int x, u, v, mask;
659  uint8_t *data = tmp->data[0];
660  int linesize = tmp->linesize[0];
661  int width = 1920/2, height = 16;
662 
663  for (x = 0; x < width; x += width/10) {
664  uint8_t *startptr = data + 4 * x;
665  for (u = 0; u < 12; u++) {
666  for (v = 0; v < height; v++) {
667  uint8_t *p = startptr + v * linesize + height/2 * 4 * u;
668  for (mask = 0x80; mask; mask >>= 1, p += 4) {
669  if (mask & vga16_font[str[u] * 16 + v])
670  p[3] = 255;
671  else
672  p[3] = 0;
673  }
674  }
675  }
676  }
677 
678  return 0;
679 }
680 
682 {
683  AVFrame *tmp = NULL;
684  int ret = AVERROR(ENOMEM);
685  int width = 1920, height = 32;
686  int default_font = 0;
687 
689  goto fail;
690 
691  if (!(s->axis_frame = av_frame_alloc()))
692  goto fail;
693 
694  if (render_freetype(s, tmp, s->fontfile) < 0 &&
695  render_fontconfig(s, tmp, s->font) < 0 &&
696  (default_font = 1, ret = render_default_font(tmp)) < 0)
697  goto fail;
698 
699  if (default_font)
700  width /= 2, height /= 2;
701 
702  if ((ret = init_axis_color(s, tmp, default_font)) < 0)
703  goto fail;
704 
705  if ((ret = ff_scale_image(s->axis_frame->data, s->axis_frame->linesize, s->width, s->axis_h,
706  convert_axis_pixel_format(s->format), tmp->data, tmp->linesize,
707  width, height, AV_PIX_FMT_RGBA, s->ctx)) < 0)
708  goto fail;
709 
710  av_frame_free(&tmp);
711  s->axis_frame->width = s->width;
712  s->axis_frame->height = s->axis_h;
713  s->axis_frame->format = convert_axis_pixel_format(s->format);
714  return 0;
715 
716 fail:
717  av_frame_free(&tmp);
718  av_frame_free(&s->axis_frame);
719  return ret;
720 }
721 
722 static float calculate_gamma(float v, float g)
723 {
724  if (g == 1.0f)
725  return v;
726  if (g == 2.0f)
727  return sqrtf(v);
728  if (g == 3.0f)
729  return cbrtf(v);
730  if (g == 4.0f)
731  return sqrtf(sqrtf(v));
732  return expf(logf(v) / g);
733 }
734 
735 static void rgb_from_cqt(ColorFloat *c, const AVComplexFloat *v, float g, int len, float cscheme[6])
736 {
737  int x;
738  for (x = 0; x < len; x++) {
739  c[x].rgb.r = 255.0f * calculate_gamma(FFMIN(1.0f, cscheme[0] * v[x].re + cscheme[3] * v[x].im), g);
740  c[x].rgb.g = 255.0f * calculate_gamma(FFMIN(1.0f, cscheme[1] * v[x].re + cscheme[4] * v[x].im), g);
741  c[x].rgb.b = 255.0f * calculate_gamma(FFMIN(1.0f, cscheme[2] * v[x].re + cscheme[5] * v[x].im), g);
742  }
743 }
744 
745 static void yuv_from_cqt(ColorFloat *c, const AVComplexFloat *v, float gamma, int len, float cm[3][3], float cscheme[6])
746 {
747  int x;
748  for (x = 0; x < len; x++) {
749  float r, g, b;
750  r = calculate_gamma(FFMIN(1.0f, cscheme[0] * v[x].re + cscheme[3] * v[x].im), gamma);
751  g = calculate_gamma(FFMIN(1.0f, cscheme[1] * v[x].re + cscheme[4] * v[x].im), gamma);
752  b = calculate_gamma(FFMIN(1.0f, cscheme[2] * v[x].re + cscheme[5] * v[x].im), gamma);
753  c[x].yuv.y = cm[0][0] * r + cm[0][1] * g + cm[0][2] * b;
754  c[x].yuv.u = cm[1][0] * r + cm[1][1] * g + cm[1][2] * b;
755  c[x].yuv.v = cm[2][0] * r + cm[2][1] * g + cm[2][2] * b;
756  }
757 }
758 
759 static void draw_bar_rgb(AVFrame *out, const float *h, const float *rcp_h,
760  const ColorFloat *c, int bar_h, float bar_t)
761 {
762  int x, y, w = out->width;
763  float mul, ht, rcp_bar_h = 1.0f / bar_h, rcp_bar_t = 1.0f / bar_t;
764  uint8_t *v = out->data[0], *lp;
765  int ls = out->linesize[0];
766 
767  for (y = 0; y < bar_h; y++) {
768  ht = (bar_h - y) * rcp_bar_h;
769  lp = v + y * ls;
770  for (x = 0; x < w; x++) {
771  if (h[x] <= ht) {
772  *lp++ = 0;
773  *lp++ = 0;
774  *lp++ = 0;
775  } else {
776  mul = (h[x] - ht) * rcp_h[x];
777  mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f;
778  *lp++ = lrintf(mul * c[x].rgb.r);
779  *lp++ = lrintf(mul * c[x].rgb.g);
780  *lp++ = lrintf(mul * c[x].rgb.b);
781  }
782  }
783  }
784 }
785 
786 #define DRAW_BAR_WITH_CHROMA(x) \
787 do { \
788  if (h[x] <= ht) { \
789  *lpy++ = 16; \
790  *lpu++ = 128; \
791  *lpv++ = 128; \
792  } else { \
793  mul = (h[x] - ht) * rcp_h[x]; \
794  mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f; \
795  *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
796  *lpu++ = lrintf(mul * c[x].yuv.u + 128.0f); \
797  *lpv++ = lrintf(mul * c[x].yuv.v + 128.0f); \
798  } \
799 } while (0)
800 
801 #define DRAW_BAR_WITHOUT_CHROMA(x) \
802 do { \
803  if (h[x] <= ht) { \
804  *lpy++ = 16; \
805  } else { \
806  mul = (h[x] - ht) * rcp_h[x]; \
807  mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f; \
808  *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
809  } \
810 } while (0)
811 
812 static void draw_bar_yuv(AVFrame *out, const float *h, const float *rcp_h,
813  const ColorFloat *c, int bar_h, float bar_t)
814 {
815  int x, y, yh, w = out->width;
816  float mul, ht, rcp_bar_h = 1.0f / bar_h, rcp_bar_t = 1.0f / bar_t;
817  uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2];
818  uint8_t *lpy, *lpu, *lpv;
819  int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2];
820  int fmt = out->format;
821 
822  for (y = 0; y < bar_h; y += 2) {
823  yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
824  ht = (bar_h - y) * rcp_bar_h;
825  lpy = vy + y * lsy;
826  lpu = vu + yh * lsu;
827  lpv = vv + yh * lsv;
828  if (fmt == AV_PIX_FMT_YUV444P) {
829  for (x = 0; x < w; x += 2) {
832  }
833  } else {
834  for (x = 0; x < w; x += 2) {
837  }
838  }
839 
840  ht = (bar_h - (y+1)) * rcp_bar_h;
841  lpy = vy + (y+1) * lsy;
842  lpu = vu + (y+1) * lsu;
843  lpv = vv + (y+1) * lsv;
844  if (fmt == AV_PIX_FMT_YUV444P) {
845  for (x = 0; x < w; x += 2) {
848  }
849  } else if (fmt == AV_PIX_FMT_YUV422P) {
850  for (x = 0; x < w; x += 2) {
853  }
854  } else {
855  for (x = 0; x < w; x += 2) {
858  }
859  }
860  }
861 }
862 
863 static void draw_axis_rgb(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
864 {
865  int x, y, w = axis->width, h = axis->height;
866  float a, rcp_255 = 1.0f / 255.0f;
867  uint8_t *lp, *lpa;
868 
869  for (y = 0; y < h; y++) {
870  lp = out->data[0] + (off + y) * out->linesize[0];
871  lpa = axis->data[0] + y * axis->linesize[0];
872  for (x = 0; x < w; x++) {
873  if (!lpa[3]) {
874  *lp++ = lrintf(c[x].rgb.r);
875  *lp++ = lrintf(c[x].rgb.g);
876  *lp++ = lrintf(c[x].rgb.b);
877  } else if (lpa[3] == 255) {
878  *lp++ = lpa[0];
879  *lp++ = lpa[1];
880  *lp++ = lpa[2];
881  } else {
882  a = rcp_255 * lpa[3];
883  *lp++ = lrintf(a * lpa[0] + (1.0f - a) * c[x].rgb.r);
884  *lp++ = lrintf(a * lpa[1] + (1.0f - a) * c[x].rgb.g);
885  *lp++ = lrintf(a * lpa[2] + (1.0f - a) * c[x].rgb.b);
886  }
887  lpa += 4;
888  }
889  }
890 }
891 
892 #define BLEND_WITH_CHROMA(c) \
893 do { \
894  if (!*lpaa) { \
895  *lpy = lrintf(c.yuv.y + 16.0f); \
896  *lpu = lrintf(c.yuv.u + 128.0f); \
897  *lpv = lrintf(c.yuv.v + 128.0f); \
898  } else if (255 == *lpaa) { \
899  *lpy = *lpay; \
900  *lpu = *lpau; \
901  *lpv = *lpav; \
902  } else { \
903  float a = (1.0f/255.0f) * (*lpaa); \
904  *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
905  *lpu = lrintf(a * (*lpau) + (1.0f - a) * (c.yuv.u + 128.0f)); \
906  *lpv = lrintf(a * (*lpav) + (1.0f - a) * (c.yuv.v + 128.0f)); \
907  } \
908  lpy++; lpu++; lpv++; \
909  lpay++; lpau++; lpav++; lpaa++; \
910 } while (0)
911 
912 #define BLEND_WITHOUT_CHROMA(c, alpha_inc) \
913 do { \
914  if (!*lpaa) { \
915  *lpy = lrintf(c.yuv.y + 16.0f); \
916  } else if (255 == *lpaa) { \
917  *lpy = *lpay; \
918  } else { \
919  float a = (1.0f/255.0f) * (*lpaa); \
920  *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
921  } \
922  lpy++; \
923  lpay++; lpaa += alpha_inc; \
924 } while (0)
925 
926 #define BLEND_CHROMA2(c) \
927 do { \
928  if (!lpaa[0] && !lpaa[1]) { \
929  *lpu = lrintf(c.yuv.u + 128.0f); \
930  *lpv = lrintf(c.yuv.v + 128.0f); \
931  } else if (255 == lpaa[0] && 255 == lpaa[1]) { \
932  *lpu = *lpau; *lpv = *lpav; \
933  } else { \
934  float a0 = (0.5f/255.0f) * lpaa[0]; \
935  float a1 = (0.5f/255.0f) * lpaa[1]; \
936  float b = 1.0f - a0 - a1; \
937  *lpu = lrintf(a0 * lpau[0] + a1 * lpau[1] + b * (c.yuv.u + 128.0f)); \
938  *lpv = lrintf(a0 * lpav[0] + a1 * lpav[1] + b * (c.yuv.v + 128.0f)); \
939  } \
940  lpau += 2; lpav += 2; lpaa++; lpu++; lpv++; \
941 } while (0)
942 
943 #define BLEND_CHROMA2x2(c) \
944 do { \
945  if (!lpaa[0] && !lpaa[1] && !lpaa[lsaa] && !lpaa[lsaa+1]) { \
946  *lpu = lrintf(c.yuv.u + 128.0f); \
947  *lpv = lrintf(c.yuv.v + 128.0f); \
948  } else if (255 == lpaa[0] && 255 == lpaa[1] && \
949  255 == lpaa[lsaa] && 255 == lpaa[lsaa+1]) { \
950  *lpu = *lpau; *lpv = *lpav; \
951  } else { \
952  float a0 = (0.25f/255.0f) * lpaa[0]; \
953  float a1 = (0.25f/255.0f) * lpaa[1]; \
954  float a2 = (0.25f/255.0f) * lpaa[lsaa]; \
955  float a3 = (0.25f/255.0f) * lpaa[lsaa+1]; \
956  float b = 1.0f - a0 - a1 - a2 - a3; \
957  *lpu = lrintf(a0 * lpau[0] + a1 * lpau[1] + a2 * lpau[lsau] + a3 * lpau[lsau+1] \
958  + b * (c.yuv.u + 128.0f)); \
959  *lpv = lrintf(a0 * lpav[0] + a1 * lpav[1] + a2 * lpav[lsav] + a3 * lpav[lsav+1] \
960  + b * (c.yuv.v + 128.0f)); \
961  } \
962  lpau += 2; lpav += 2; lpaa++; lpu++; lpv++; \
963 } while (0)
964 
965 static void draw_axis_yuv(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
966 {
967  int fmt = out->format, x, y, yh, w = axis->width, h = axis->height;
968  int offh = (fmt == AV_PIX_FMT_YUV420P) ? off / 2 : off;
969  uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2];
970  uint8_t *vay = axis->data[0], *vau = axis->data[1], *vav = axis->data[2], *vaa = axis->data[3];
971  int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2];
972  int lsay = axis->linesize[0], lsau = axis->linesize[1], lsav = axis->linesize[2], lsaa = axis->linesize[3];
973  uint8_t *lpy, *lpu, *lpv, *lpay, *lpau, *lpav, *lpaa;
974 
975  for (y = 0; y < h; y += 2) {
976  yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
977  lpy = vy + (off + y) * lsy;
978  lpu = vu + (offh + yh) * lsu;
979  lpv = vv + (offh + yh) * lsv;
980  lpay = vay + y * lsay;
981  lpau = vau + y * lsau;
982  lpav = vav + y * lsav;
983  lpaa = vaa + y * lsaa;
984  if (fmt == AV_PIX_FMT_YUV444P) {
985  for (x = 0; x < w; x += 2) {
986  BLEND_WITH_CHROMA(c[x]);
987  BLEND_WITH_CHROMA(c[x+1]);
988  }
989  } else if (fmt == AV_PIX_FMT_YUV422P) {
990  for (x = 0; x < w; x += 2) {
991  BLEND_WITHOUT_CHROMA(c[x], 0);
992  BLEND_CHROMA2(c[x]);
993  BLEND_WITHOUT_CHROMA(c[x+1], 1);
994  }
995  } else {
996  for (x = 0; x < w; x += 2) {
997  BLEND_WITHOUT_CHROMA(c[x], 0);
998  BLEND_CHROMA2x2(c[x]);
999  BLEND_WITHOUT_CHROMA(c[x+1], 1);
1000  }
1001  }
1002 
1003  lpy = vy + (off + y + 1) * lsy;
1004  lpu = vu + (off + y + 1) * lsu;
1005  lpv = vv + (off + y + 1) * lsv;
1006  lpay = vay + (y + 1) * lsay;
1007  lpau = vau + (y + 1) * lsau;
1008  lpav = vav + (y + 1) * lsav;
1009  lpaa = vaa + (y + 1) * lsaa;
1010  if (fmt == AV_PIX_FMT_YUV444P) {
1011  for (x = 0; x < w; x += 2) {
1012  BLEND_WITH_CHROMA(c[x]);
1013  BLEND_WITH_CHROMA(c[x+1]);
1014  }
1015  } else if (fmt == AV_PIX_FMT_YUV422P) {
1016  for (x = 0; x < w; x += 2) {
1017  BLEND_WITHOUT_CHROMA(c[x], 0);
1018  BLEND_CHROMA2(c[x]);
1019  BLEND_WITHOUT_CHROMA(c[x+1], 1);
1020  }
1021  } else {
1022  for (x = 0; x < w; x += 2) {
1023  BLEND_WITHOUT_CHROMA(c[x], 1);
1024  BLEND_WITHOUT_CHROMA(c[x+1], 1);
1025  }
1026  }
1027  }
1028 }
1029 
1030 static void draw_sono(AVFrame *out, AVFrame *sono, int off, int idx)
1031 {
1032  int fmt = out->format, h = sono->height;
1033  int nb_planes = (fmt == AV_PIX_FMT_RGB24) ? 1 : 3;
1034  int offh = (fmt == AV_PIX_FMT_YUV420P) ? off / 2 : off;
1035  int inc = (fmt == AV_PIX_FMT_YUV420P) ? 2 : 1;
1036  ptrdiff_t ls;
1037  int i, y, yh;
1038 
1039  ls = FFABS(FFMIN(out->linesize[0], sono->linesize[0]));
1040  for (y = 0; y < h; y++) {
1041  memcpy(out->data[0] + (off + y) * out->linesize[0],
1042  sono->data[0] + (idx + y) % h * sono->linesize[0], ls);
1043  }
1044 
1045  for (i = 1; i < nb_planes; i++) {
1046  ls = FFABS(FFMIN(out->linesize[i], sono->linesize[i]));
1047  for (y = 0; y < h; y += inc) {
1048  yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
1049  memcpy(out->data[i] + (offh + yh) * out->linesize[i],
1050  sono->data[i] + (idx + y) % h * sono->linesize[i], ls);
1051  }
1052  }
1053 }
1054 
1055 static void update_sono_rgb(AVFrame *sono, const ColorFloat *c, int idx)
1056 {
1057  int x, w = sono->width;
1058  uint8_t *lp = sono->data[0] + idx * sono->linesize[0];
1059 
1060  for (x = 0; x < w; x++) {
1061  *lp++ = lrintf(c[x].rgb.r);
1062  *lp++ = lrintf(c[x].rgb.g);
1063  *lp++ = lrintf(c[x].rgb.b);
1064  }
1065 }
1066 
1067 static void update_sono_yuv(AVFrame *sono, const ColorFloat *c, int idx)
1068 {
1069  int x, fmt = sono->format, w = sono->width;
1070  uint8_t *lpy = sono->data[0] + idx * sono->linesize[0];
1071  uint8_t *lpu = sono->data[1] + idx * sono->linesize[1];
1072  uint8_t *lpv = sono->data[2] + idx * sono->linesize[2];
1073 
1074  for (x = 0; x < w; x += 2) {
1075  *lpy++ = lrintf(c[x].yuv.y + 16.0f);
1076  *lpu++ = lrintf(c[x].yuv.u + 128.0f);
1077  *lpv++ = lrintf(c[x].yuv.v + 128.0f);
1078  *lpy++ = lrintf(c[x+1].yuv.y + 16.0f);
1079  if (fmt == AV_PIX_FMT_YUV444P) {
1080  *lpu++ = lrintf(c[x+1].yuv.u + 128.0f);
1081  *lpv++ = lrintf(c[x+1].yuv.v + 128.0f);
1082  }
1083  }
1084 }
1085 
1087 {
1088  int x, i;
1089  if (!s->sono_count) {
1090  for (x = 0; x < s->cqt_len; x++) {
1091  s->h_buf[x] = s->bar_v_buf[x] * 0.5f * (s->cqt_result[x].re + s->cqt_result[x].im);
1092  }
1093  if (s->fcount > 1) {
1094  float rcp_fcount = 1.0f / s->fcount;
1095  for (x = 0; x < s->width; x++) {
1096  float h = 0.0f;
1097  for (i = 0; i < s->fcount; i++)
1098  h += s->h_buf[s->fcount * x + i];
1099  s->h_buf[x] = rcp_fcount * h;
1100  }
1101  }
1102  for (x = 0; x < s->width; x++) {
1103  s->h_buf[x] = calculate_gamma(s->h_buf[x], s->bar_g);
1104  s->rcp_h_buf[x] = 1.0f / (s->h_buf[x] + 0.0001f);
1105  }
1106  }
1107 
1108  for (x = 0; x < s->cqt_len; x++) {
1109  s->cqt_result[x].re *= s->sono_v_buf[x];
1110  s->cqt_result[x].im *= s->sono_v_buf[x];
1111  }
1112 
1113  if (s->fcount > 1) {
1114  float rcp_fcount = 1.0f / s->fcount;
1115  for (x = 0; x < s->width; x++) {
1116  AVComplexFloat result = {0.0f, 0.0f};
1117  for (i = 0; i < s->fcount; i++) {
1118  result.re += s->cqt_result[s->fcount * x + i].re;
1119  result.im += s->cqt_result[s->fcount * x + i].im;
1120  }
1121  s->cqt_result[x].re = rcp_fcount * result.re;
1122  s->cqt_result[x].im = rcp_fcount * result.im;
1123  }
1124  }
1125 
1126  if (s->format == AV_PIX_FMT_RGB24)
1127  rgb_from_cqt(s->c_buf, s->cqt_result, s->sono_g, s->width, s->cscheme_v);
1128  else
1129  yuv_from_cqt(s->c_buf, s->cqt_result, s->sono_g, s->width, s->cmatrix, s->cscheme_v);
1130 }
1131 
1132 static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
1133 {
1134  AVFilterLink *outlink = ctx->outputs[0];
1135  ShowCQTContext *s = ctx->priv;
1136  int64_t last_time, cur_time;
1137 
1138 #define UPDATE_TIME(t) \
1139  cur_time = av_gettime_relative(); \
1140  t += cur_time - last_time; \
1141  last_time = cur_time
1142 
1143  last_time = av_gettime_relative();
1144 
1145  memcpy(s->fft_input, s->fft_data, s->fft_len * sizeof(*s->fft_data));
1146  if (s->attack_data) {
1147  int k;
1148  for (k = 0; k < s->remaining_fill_max; k++) {
1149  s->fft_input[s->fft_len/2+k].re *= s->attack_data[k];
1150  s->fft_input[s->fft_len/2+k].im *= s->attack_data[k];
1151  }
1152  }
1153 
1154  s->tx_fn(s->fft_ctx, s->fft_result, s->fft_input, sizeof(AVComplexFloat));
1155  UPDATE_TIME(s->fft_time);
1156 
1157  s->cqt_calc(s->cqt_result, s->fft_result, s->coeffs, s->cqt_len, s->fft_len);
1158  UPDATE_TIME(s->cqt_time);
1159 
1160  process_cqt(s);
1161  UPDATE_TIME(s->process_cqt_time);
1162 
1163  if (s->sono_h) {
1164  s->update_sono(s->sono_frame, s->c_buf, s->sono_idx);
1165  UPDATE_TIME(s->update_sono_time);
1166  }
1167 
1168  if (!s->sono_count) {
1169  AVFrame *out = *frameout = ff_get_video_buffer(outlink, outlink->w, outlink->h);
1170  if (!out)
1171  return AVERROR(ENOMEM);
1172  out->sample_aspect_ratio = av_make_q(1, 1);
1173  out->color_range = AVCOL_RANGE_MPEG;
1174  out->colorspace = s->csp;
1175  UPDATE_TIME(s->alloc_time);
1176 
1177  if (s->bar_h) {
1178  s->draw_bar(out, s->h_buf, s->rcp_h_buf, s->c_buf, s->bar_h, s->bar_t);
1179  UPDATE_TIME(s->bar_time);
1180  }
1181 
1182  if (s->axis_h) {
1183  s->draw_axis(out, s->axis_frame, s->c_buf, s->bar_h);
1184  UPDATE_TIME(s->axis_time);
1185  }
1186 
1187  if (s->sono_h) {
1188  s->draw_sono(out, s->sono_frame, s->bar_h + s->axis_h, s->sono_idx);
1189  UPDATE_TIME(s->sono_time);
1190  }
1191  }
1192  s->sono_count = (s->sono_count + 1) % s->count;
1193  if (s->sono_h)
1194  s->sono_idx = (s->sono_idx + s->sono_h - 1) % s->sono_h;
1195  return 0;
1196 }
1197 
1199 {
1200  double kr, kg, kb;
1201 
1202  /* from vf_colorspace.c */
1203  switch (s->csp) {
1204  default:
1205  av_log(s->ctx, AV_LOG_WARNING, "unsupported colorspace, setting it to unspecified.\n");
1206  s->csp = AVCOL_SPC_UNSPECIFIED;
1208  case AVCOL_SPC_UNSPECIFIED:
1209  case AVCOL_SPC_BT470BG:
1210  case AVCOL_SPC_SMPTE170M:
1211  kr = 0.299; kb = 0.114; break;
1212  case AVCOL_SPC_BT709:
1213  kr = 0.2126; kb = 0.0722; break;
1214  case AVCOL_SPC_FCC:
1215  kr = 0.30; kb = 0.11; break;
1216  case AVCOL_SPC_SMPTE240M:
1217  kr = 0.212; kb = 0.087; break;
1218  case AVCOL_SPC_BT2020_NCL:
1219  kr = 0.2627; kb = 0.0593; break;
1220  }
1221 
1222  kg = 1.0 - kr - kb;
1223  s->cmatrix[0][0] = 219.0 * kr;
1224  s->cmatrix[0][1] = 219.0 * kg;
1225  s->cmatrix[0][2] = 219.0 * kb;
1226  s->cmatrix[1][0] = -112.0 * kr / (1.0 - kb);
1227  s->cmatrix[1][1] = -112.0 * kg / (1.0 - kb);
1228  s->cmatrix[1][2] = 112.0;
1229  s->cmatrix[2][0] = 112.0;
1230  s->cmatrix[2][1] = -112.0 * kg / (1.0 - kr);
1231  s->cmatrix[2][2] = -112.0 * kb / (1.0 - kr);
1232 }
1233 
1235 {
1236  char tail[2];
1237  int k;
1238 
1239  if (sscanf(s->cscheme, " %f | %f | %f | %f | %f | %f %1s", &s->cscheme_v[0],
1240  &s->cscheme_v[1], &s->cscheme_v[2], &s->cscheme_v[3], &s->cscheme_v[4],
1241  &s->cscheme_v[5], tail) != 6)
1242  goto fail;
1243 
1244  for (k = 0; k < 6; k++)
1245  if (isnan(s->cscheme_v[k]) || s->cscheme_v[k] < 0.0f || s->cscheme_v[k] > 1.0f)
1246  goto fail;
1247 
1248  return 0;
1249 
1250 fail:
1251  av_log(s->ctx, AV_LOG_ERROR, "invalid cscheme.\n");
1252  return AVERROR(EINVAL);
1253 }
1254 
1255 /* main filter control */
1257 {
1258  ShowCQTContext *s = ctx->priv;
1259  s->ctx = ctx;
1260 
1261  if (!s->fullhd) {
1262  av_log(ctx, AV_LOG_WARNING, "fullhd option is deprecated, use size/s option instead.\n");
1263  if (s->width != 1920 || s->height != 1080) {
1264  av_log(ctx, AV_LOG_ERROR, "fullhd set to 0 but with custom dimension.\n");
1265  return AVERROR(EINVAL);
1266  }
1267  s->width /= 2;
1268  s->height /= 2;
1269  s->fullhd = 1;
1270  }
1271 
1272  if (s->axis_h < 0) {
1273  s->axis_h = s->width / 60;
1274  if (s->axis_h & 1)
1275  s->axis_h++;
1276  if (s->bar_h >= 0 && s->sono_h >= 0)
1277  s->axis_h = s->height - s->bar_h - s->sono_h;
1278  if (s->bar_h >= 0 && s->sono_h < 0)
1279  s->axis_h = FFMIN(s->axis_h, s->height - s->bar_h);
1280  if (s->bar_h < 0 && s->sono_h >= 0)
1281  s->axis_h = FFMIN(s->axis_h, s->height - s->sono_h);
1282  }
1283 
1284  if (s->bar_h < 0) {
1285  s->bar_h = (s->height - s->axis_h) / 2;
1286  if (s->bar_h & 1)
1287  s->bar_h--;
1288  if (s->sono_h >= 0)
1289  s->bar_h = s->height - s->sono_h - s->axis_h;
1290  }
1291 
1292  if (s->sono_h < 0)
1293  s->sono_h = s->height - s->axis_h - s->bar_h;
1294 
1295  if ((s->width & 1) || (s->height & 1) || (s->bar_h & 1) || (s->axis_h & 1) || (s->sono_h & 1) ||
1296  (s->bar_h < 0) || (s->axis_h < 0) || (s->sono_h < 0) || (s->bar_h > s->height) ||
1297  (s->axis_h > s->height) || (s->sono_h > s->height) || (s->bar_h + s->axis_h + s->sono_h != s->height)) {
1298  av_log(ctx, AV_LOG_ERROR, "invalid dimension.\n");
1299  return AVERROR(EINVAL);
1300  }
1301 
1302  if (!s->fcount) {
1303  do {
1304  s->fcount++;
1305  } while(s->fcount * s->width < 1920 && s->fcount < 10);
1306  }
1307 
1309 
1310  return init_cscheme(s);
1311 }
1312 
1314 {
1315  common_uninit(ctx->priv);
1316 }
1317 
1319  AVFilterFormatsConfig **cfg_in,
1320  AVFilterFormatsConfig **cfg_out)
1321 {
1324  static const enum AVPixelFormat pix_fmts[] = {
1327  };
1330  int ret;
1331 
1332  /* set input audio formats */
1334  if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
1335  return ret;
1336 
1338  if (ret < 0)
1339  return ret;
1340 
1341  /* set output video format */
1343  if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
1344  return ret;
1345 
1346  return 0;
1347 }
1348 
1349 static int config_output(AVFilterLink *outlink)
1350 {
1351  FilterLink *l = ff_filter_link(outlink);
1352  AVFilterContext *ctx = outlink->src;
1353  AVFilterLink *inlink = ctx->inputs[0];
1354  ShowCQTContext *s = ctx->priv;
1355  float scale = 1.f;
1356  int ret;
1357 
1358  common_uninit(s);
1359 
1360  outlink->w = s->width;
1361  outlink->h = s->height;
1362  s->format = outlink->format;
1363  outlink->sample_aspect_ratio = av_make_q(1, 1);
1364  l->frame_rate = s->rate;
1365  outlink->time_base = av_inv_q(s->rate);
1366  av_log(ctx, AV_LOG_VERBOSE, "video: %dx%d %s %d/%d fps, bar_h = %d, axis_h = %d, sono_h = %d.\n",
1367  s->width, s->height, av_get_pix_fmt_name(s->format), s->rate.num, s->rate.den,
1368  s->bar_h, s->axis_h, s->sono_h);
1369 
1370  s->cqt_len = s->width * s->fcount;
1371  if (!(s->freq = create_freq_table(s->basefreq, s->endfreq, s->cqt_len)))
1372  return AVERROR(ENOMEM);
1373 
1374  if ((ret = init_volume(s)) < 0)
1375  return ret;
1376 
1377  s->fft_bits = FFMAX(ceil(log2(inlink->sample_rate * s->timeclamp)), 4);
1378  s->fft_len = 1 << s->fft_bits;
1379  av_log(ctx, AV_LOG_VERBOSE, "fft_len = %d, cqt_len = %d.\n", s->fft_len, s->cqt_len);
1380 
1381  ret = av_tx_init(&s->fft_ctx, &s->tx_fn, AV_TX_FLOAT_FFT, 0, s->fft_len, &scale, 0);
1382  s->fft_data = av_calloc(s->fft_len, sizeof(*s->fft_data));
1383  s->fft_input = av_calloc(FFALIGN(s->fft_len + 64, 256), sizeof(*s->fft_input));
1384  s->fft_result = av_calloc(FFALIGN(s->fft_len + 64, 256), sizeof(*s->fft_result));
1385  s->cqt_result = av_malloc_array(s->cqt_len, sizeof(*s->cqt_result));
1386  if (!s->fft_ctx || !s->fft_data || !s->fft_result || !s->cqt_result)
1387  return AVERROR(ENOMEM);
1388 
1389  s->remaining_fill_max = s->fft_len / 2;
1390  if (s->attack > 0.0) {
1391  int k;
1392 
1393  s->remaining_fill_max = FFMIN(s->remaining_fill_max, ceil(inlink->sample_rate * s->attack));
1394  s->attack_data = av_malloc_array(s->remaining_fill_max, sizeof(*s->attack_data));
1395  if (!s->attack_data)
1396  return AVERROR(ENOMEM);
1397 
1398  for (k = 0; k < s->remaining_fill_max; k++) {
1399  double y = M_PI * k / (inlink->sample_rate * s->attack);
1400  s->attack_data[k] = 0.355768 + 0.487396 * cos(y) + 0.144232 * cos(2*y) + 0.012604 * cos(3*y);
1401  }
1402  }
1403 
1404  s->cqt_align = 1;
1405  s->cqt_calc = cqt_calc;
1406  s->permute_coeffs = NULL;
1407  s->draw_sono = draw_sono;
1408  if (s->format == AV_PIX_FMT_RGB24) {
1409  s->draw_bar = draw_bar_rgb;
1410  s->draw_axis = draw_axis_rgb;
1411  s->update_sono = update_sono_rgb;
1412  } else {
1413  s->draw_bar = draw_bar_yuv;
1414  s->draw_axis = draw_axis_yuv;
1415  s->update_sono = update_sono_yuv;
1416  }
1417 
1418 #if ARCH_X86 && HAVE_X86ASM
1420 #endif
1421 
1422  if ((ret = init_cqt(s)) < 0)
1423  return ret;
1424 
1425  if (s->axis_h) {
1426  if (!s->axis) {
1427  if ((ret = init_axis_empty(s)) < 0)
1428  return ret;
1429  } else if (s->axisfile) {
1430  if (init_axis_from_file(s) < 0) {
1431  av_log(ctx, AV_LOG_WARNING, "loading axis image failed, fallback to font rendering.\n");
1432  if (init_axis_from_font(s) < 0) {
1433  av_log(ctx, AV_LOG_WARNING, "loading axis font failed, disable text drawing.\n");
1434  if ((ret = init_axis_empty(s)) < 0)
1435  return ret;
1436  }
1437  }
1438  } else {
1439  if (init_axis_from_font(s) < 0) {
1440  av_log(ctx, AV_LOG_WARNING, "loading axis font failed, disable text drawing.\n");
1441  if ((ret = init_axis_empty(s)) < 0)
1442  return ret;
1443  }
1444  }
1445  }
1446 
1447  if (s->sono_h) {
1448  s->sono_frame = alloc_frame_empty((outlink->format == AV_PIX_FMT_YUV420P) ?
1449  AV_PIX_FMT_YUV422P : outlink->format, s->width, s->sono_h);
1450  if (!s->sono_frame)
1451  return AVERROR(ENOMEM);
1452  }
1453 
1454  s->h_buf = av_malloc_array(s->cqt_len, sizeof (*s->h_buf));
1455  s->rcp_h_buf = av_malloc_array(s->width, sizeof(*s->rcp_h_buf));
1456  s->c_buf = av_malloc_array(s->width, sizeof(*s->c_buf));
1457  if (!s->h_buf || !s->rcp_h_buf || !s->c_buf)
1458  return AVERROR(ENOMEM);
1459 
1460  s->sono_count = 0;
1461  s->next_pts = 0;
1462  s->sono_idx = 0;
1463  s->remaining_fill = s->remaining_fill_max;
1464  s->remaining_frac = 0;
1465  s->step_frac = av_div_q(av_make_q(inlink->sample_rate, s->count) , s->rate);
1466  s->step = (int)(s->step_frac.num / s->step_frac.den);
1467  s->step_frac.num %= s->step_frac.den;
1468  if (s->step_frac.num) {
1469  av_log(ctx, AV_LOG_VERBOSE, "audio: %d Hz, step = %d + %d/%d.\n",
1470  inlink->sample_rate, s->step, s->step_frac.num, s->step_frac.den);
1471  } else {
1472  av_log(ctx, AV_LOG_VERBOSE, "audio: %d Hz, step = %d.\n",
1473  inlink->sample_rate, s->step);
1474  }
1475 
1476  return 0;
1477 }
1478 
1479 
1480 static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
1481 {
1482  AVFilterContext *ctx = inlink->dst;
1483  AVFilterLink *outlink = ctx->outputs[0];
1484  ShowCQTContext *s = ctx->priv;
1485  int remaining, step, ret, x, i, j, m, got_frame = 0;
1486  float *audio_data;
1487  AVFrame *out = NULL;
1488 
1489  if (!insamples) {
1490  while (s->remaining_fill < s->remaining_fill_max) {
1491  memset(&s->fft_data[s->fft_len/2 + s->remaining_fill_max - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
1492  ret = plot_cqt(ctx, &out);
1493  if (ret < 0)
1494  return ret;
1495 
1496  step = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
1497  s->remaining_frac = (s->step_frac.num + s->remaining_frac) % s->step_frac.den;
1498  for (x = 0; x < (s->fft_len/2 + s->remaining_fill_max - step); x++)
1499  s->fft_data[x] = s->fft_data[x+step];
1500  s->remaining_fill += step;
1501  s->next_pts++;
1502 
1503  if (out) {
1504  out->pts = s->next_pts;
1505  out->duration = 1;
1506  return ff_filter_frame(outlink, out);
1507  }
1508  }
1509  return 0;
1510  }
1511 
1512  remaining = insamples->nb_samples;
1513  audio_data = (float*) insamples->data[0];
1514 
1515  while (remaining) {
1516  i = insamples->nb_samples - remaining;
1517  j = s->fft_len/2 + s->remaining_fill_max - s->remaining_fill;
1518  if (remaining >= s->remaining_fill) {
1519  for (m = FFMAX(0, -j); m < s->remaining_fill; m++) {
1520  s->fft_data[j+m].re = audio_data[2*(i+m)];
1521  s->fft_data[j+m].im = audio_data[2*(i+m)+1];
1522  }
1523  ret = plot_cqt(ctx, &out);
1524  if (ret < 0) {
1525  av_frame_free(&insamples);
1526  return ret;
1527  }
1528  remaining -= s->remaining_fill;
1529  if (out) {
1530  int64_t pts = av_rescale_q(insamples->nb_samples - remaining - s->remaining_fill_max,
1531  av_make_q(1, inlink->sample_rate), inlink->time_base);
1532  out->pts = av_rescale_q(insamples->pts + pts, inlink->time_base, outlink->time_base);
1533  out->duration = 1;
1534  got_frame = 1;
1535  ret = ff_filter_frame(outlink, out);
1536  if (ret < 0) {
1537  av_frame_free(&insamples);
1538  return ret;
1539  }
1540  out = NULL;
1541  }
1542  step = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
1543  s->remaining_frac = (s->step_frac.num + s->remaining_frac) % s->step_frac.den;
1544  for (m = 0; m < s->fft_len/2 + s->remaining_fill_max - step; m++)
1545  s->fft_data[m] = s->fft_data[m+step];
1546  s->remaining_fill = step;
1547  } else {
1548  for (m = FFMAX(0, -j); m < remaining; m++) {
1549  s->fft_data[j+m].re = audio_data[2*(i+m)];
1550  s->fft_data[j+m].im = audio_data[2*(i+m)+1];
1551  }
1552  s->remaining_fill -= remaining;
1553  remaining = 0;
1554  }
1555  }
1556  if (!got_frame)
1557  ff_filter_set_ready(ctx, 100);
1558  av_frame_free(&insamples);
1559  return 0;
1560 }
1561 
1563 {
1564  AVFilterLink *inlink = ctx->inputs[0];
1565  AVFilterLink *outlink = ctx->outputs[0];
1566  ShowCQTContext *s = ctx->priv;
1567  int nb_samples, ret, status;
1568  int64_t pts;
1569  AVFrame *in;
1570 
1572 
1573  nb_samples = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
1574  ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
1575  if (ret < 0)
1576  return ret;
1577  if (ret > 0)
1578  return filter_frame(inlink, in);
1579 
1581  if (status == AVERROR_EOF) {
1582  s->next_pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
1584  ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
1585  return ret;
1586  }
1587  }
1588 
1589  FF_FILTER_FORWARD_WANTED(outlink, inlink);
1590 
1591  return FFERROR_NOT_READY;
1592 }
1593 
1594 static const AVFilterPad showcqt_outputs[] = {
1595  {
1596  .name = "default",
1597  .type = AVMEDIA_TYPE_VIDEO,
1598  .config_props = config_output,
1599  },
1600 };
1601 
1603  .p.name = "showcqt",
1604  .p.description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant/Clamped Q Transform) spectrum video output."),
1605  .p.priv_class = &showcqt_class,
1606  .init = init,
1607  .activate = activate,
1608  .uninit = uninit,
1609  .priv_size = sizeof(ShowCQTContext),
1613 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
formats
formats
Definition: signature.h:47
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:89
rgb::b
uint8_t b
Definition: rpzaenc.c:63
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_CHANNEL_LAYOUT_STEREO_DOWNMIX
#define AV_CHANNEL_LAYOUT_STEREO_DOWNMIX
Definition: channel_layout.h:432
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
level
uint8_t level
Definition: svq3.c:208
draw_axis_yuv
static void draw_axis_yuv(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
Definition: avf_showcqt.c:965
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
draw_bar_yuv
static void draw_bar_yuv(AVFrame *out, const float *h, const float *rcp_h, const ColorFloat *c, int bar_h, float bar_t)
Definition: avf_showcqt.c:812
out
static FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:395
color
Definition: vf_paletteuse.c:513
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1067
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:931
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
ColorFloat
Definition: avf_showcqt.h:40
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
Underlying C type is AVRational.
Definition: opt.h:315
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
init_cscheme
static int init_cscheme(ShowCQTContext *s)
Definition: avf_showcqt.c:1234
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
normalize.log
log
Definition: normalize.py:21
mask
int mask
Definition: mediacodecdec_common.c:154
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:434
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:536
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVFrame::width
int width
Definition: frame.h:506
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:43
ff_make_pixel_format_list
av_warn_unused_result AVFilterFormats * ff_make_pixel_format_list(const enum AVPixelFormat *fmts)
Create a list of supported pixel formats.
filters.h
data
const char data[16]
Definition: mxf.c:149
expf
#define expf(x)
Definition: libm.h:285
FLAGS
#define FLAGS
Definition: avf_showcqt.c:61
half
static uint8_t half(int a, int b)
Definition: mobiclip.c:540
ff_set_common_channel_layouts_from_list2
int ff_set_common_channel_layouts_from_list2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, const AVChannelLayout *fmts)
Definition: formats.c:1026
BLEND_WITH_CHROMA
#define BLEND_WITH_CHROMA(c)
Definition: avf_showcqt.c:892
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
base
uint8_t base
Definition: vp3data.h:128
c_weighting
static double c_weighting(void *p, double f)
Definition: avf_showcqt.c:224
AVComplexFloat
Definition: tx.h:27
max
#define max(a, b)
Definition: cuda_runtime.h:33
CSCHEME
#define CSCHEME
Definition: avf_showcqt.c:58
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_avf_showcqt
const FFFilter ff_avf_showcqt
Definition: avf_showcqt.c:1602
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:220
render_default_font
static int render_default_font(AVFrame *tmp)
Definition: avf_showcqt.c:654
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
video.h
draw_axis_rgb
static void draw_axis_rgb(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
Definition: avf_showcqt.c:863
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:455
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:735
avpriv_vga16_font_get
const uint8_t * avpriv_vga16_font_get(void)
Definition: xga_font_data.c:430
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
rgb
Definition: rpzaenc.c:60
AVComplexFloat::im
float im
Definition: tx.h:28
a_weighting
static double a_weighting(void *p, double f)
Definition: avf_showcqt.c:209
fail
#define fail()
Definition: checkasm.h:224
query_formats
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
Definition: avf_showcqt.c:1318
val
static double val(void *priv, double ch)
Definition: aeval.c:77
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: avf_showcqt.c:1313
pts
static int64_t pts
Definition: transcode_aac.c:644
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:368
FILTER_QUERY_FUNC2
#define FILTER_QUERY_FUNC2(func)
Definition: filters.h:241
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
lrint
#define lrint
Definition: tablegen.h:53
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:119
FFFilter
Definition: filters.h:267
BLEND_WITHOUT_CHROMA
#define BLEND_WITHOUT_CHROMA(c, alpha_inc)
Definition: avf_showcqt.c:912
s
#define s(width, name)
Definition: cbs_vp9.c:198
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:265
UPDATE_TIME
#define UPDATE_TIME(t)
DRAW_BAR_WITHOUT_CHROMA
#define DRAW_BAR_WITHOUT_CHROMA(x)
Definition: avf_showcqt.c:801
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
init_colormatrix
static void init_colormatrix(ShowCQTContext *s)
Definition: avf_showcqt.c:1198
var_names
static const char *const var_names[]
Definition: noise.c:30
floor
static __device__ float floor(float a)
Definition: cuda_runtime.h:173
g
const char * g
Definition: vf_curves.c:128
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:199
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:707
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Underlying C type is double.
Definition: opt.h:267
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:756
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:629
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
Definition: tx.h:47
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:824
BLEND_CHROMA2x2
#define BLEND_CHROMA2x2(c)
Definition: avf_showcqt.c:943
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVExpr
Definition: eval.c:171
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
ShowCQTContext
Definition: avf_showcqt.h:45
av_fallthrough
#define av_fallthrough
Definition: attributes.h:67
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_load_image
int ff_load_image(struct AVFrame **outframe, const char *filename, void *log_ctx)
Load image from filename and put the resulting image in an AVFrame.
Definition: lavfutils.c:32
clip_with_log
static double clip_with_log(void *log_ctx, const char *name, double val, double min, double max, double nan_replace, int idx)
Definition: avf_showcqt.c:188
xs
#define xs(width, name, var, subs,...)
Definition: cbs_vp9.c:305
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1535
NULL
#define NULL
Definition: coverity.c:32
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
init
static av_cold int init(AVFilterContext *ctx)
Definition: avf_showcqt.c:1256
vars
static const uint8_t vars[2][12]
Definition: camellia.c:183
render_freetype
static int render_freetype(ShowCQTContext *s, AVFrame *tmp, char *fontfile)
Definition: avf_showcqt.c:506
isnan
#define isnan(x)
Definition: libm.h:342
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
Underlying C type is two consecutive integers.
Definition: opt.h:303
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:34
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
double
double
Definition: af_crystalizer.c:132
time.h
rgb_from_cqt
static void rgb_from_cqt(ColorFloat *c, const AVComplexFloat *v, float g, int len, float cscheme[6])
Definition: avf_showcqt.c:735
exp
int8_t exp
Definition: eval.c:76
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1462
alloc_frame_empty
static AVFrame * alloc_frame_empty(enum AVPixelFormat format, int w, int h)
Definition: avf_showcqt.c:362
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFilterFormatsConfig
Lists of formats / etc.
Definition: avfilter.h:121
inc
static int inc(int num, int period)
Definition: perlin.c:34
init_axis_from_file
static int init_axis_from_file(ShowCQTContext *s)
Definition: avf_showcqt.c:406
eval.h
f
f
Definition: af_crystalizer.c:122
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
showcqt_outputs
static const AVFilterPad showcqt_outputs[]
Definition: avf_showcqt.c:1594
height
#define height
Definition: dsp.h:89
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
process_cqt
static void process_cqt(ShowCQTContext *s)
Definition: avf_showcqt.c:1086
calculate_gamma
static float calculate_gamma(float v, float g)
Definition: avf_showcqt.c:722
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
init_axis_color
static int init_axis_color(ShowCQTContext *s, AVFrame *tmp, int half)
Definition: avf_showcqt.c:459
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
init_axis_from_font
static int init_axis_from_font(ShowCQTContext *s)
Definition: avf_showcqt.c:681
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AVComplexFloat::re
float re
Definition: tx.h:28
update_sono_rgb
static void update_sono_rgb(AVFrame *sono, const ColorFloat *c, int idx)
Definition: avf_showcqt.c:1055
Coeffs::val
float * val
Definition: avf_showcqt.h:28
TLENGTH
#define TLENGTH
Definition: avf_showcqt.c:52
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:521
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
rgb::g
uint8_t g
Definition: rpzaenc.c:62
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
init_volume
static int init_volume(ShowCQTContext *s)
Definition: avf_showcqt.c:231
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
attributes.h
xga_font_data.h
ENDFREQ
#define ENDFREQ
Definition: avf_showcqt.c:51
M_PI
#define M_PI
Definition: mathematics.h:67
yuv_from_cqt
static void yuv_from_cqt(ColorFloat *c, const AVComplexFloat *v, float gamma, int len, float cm[3][3], float cscheme[6])
Definition: avf_showcqt.c:745
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
Definition: opt.h:271
BLEND_CHROMA2
#define BLEND_CHROMA2(c)
Definition: avf_showcqt.c:926
Coeffs::len
int len
Definition: avf_showcqt.h:29
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:708
convert_axis_pixel_format
static enum AVPixelFormat convert_axis_pixel_format(enum AVPixelFormat format)
Definition: avf_showcqt.c:388
create_freq_table
static double * create_freq_table(double base, double end, int n)
Definition: avf_showcqt.c:168
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:514
b_weighting
static double b_weighting(void *p, double f)
Definition: avf_showcqt.c:217
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
b_func
static double b_func(void *p, double x)
Definition: avf_showcqt.c:453
r_func
static double r_func(void *p, double x)
Definition: avf_showcqt.c:441
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:711
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
VOLUME_MAX
#define VOLUME_MAX
Definition: avf_showcqt.c:54
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cbrtf
static av_always_inline float cbrtf(float x)
Definition: libm.h:63
TLENGTH_MIN
#define TLENGTH_MIN
Definition: avf_showcqt.c:53
rgb::r
uint8_t r
Definition: rpzaenc.c:61
OFFSET
#define OFFSET(x)
Definition: avf_showcqt.c:60
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:46
draw_bar_rgb
static void draw_bar_rgb(AVFrame *out, const float *h, const float *rcp_h, const ColorFloat *c, int bar_h, float bar_t)
Definition: avf_showcqt.c:759
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
Coeffs::start
int start
Definition: avf_showcqt.h:29
log2
#define log2(x)
Definition: libm.h:406
cqt_calc
static void cqt_calc(AVComplexFloat *dst, const AVComplexFloat *src, const Coeffs *coeffs, int len, int fft_len)
Definition: avf_showcqt.c:273
activate
static int activate(AVFilterContext *ctx)
Definition: avf_showcqt.c:1562
common_uninit
static void common_uninit(ShowCQTContext *s)
Definition: avf_showcqt.c:111
ff_make_sample_format_list
av_warn_unused_result AVFilterFormats * ff_make_sample_format_list(const enum AVSampleFormat *fmts)
Create a list of supported sample formats.
plot_cqt
static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
Definition: avf_showcqt.c:1132
ret
ret
Definition: filter_design.txt:187
config_output
static int config_output(AVFilterLink *outlink)
Definition: avf_showcqt.c:1349
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:264
FONTCOLOR
#define FONTCOLOR
Definition: avf_showcqt.c:55
showcqt_options
static const AVOption showcqt_options[]
Definition: avf_showcqt.c:63
AVFrame::height
int height
Definition: frame.h:506
Coeffs
Definition: af_atilt.c:28
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
channel_layout.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:705
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
midi
static double midi(void *p, double f)
Definition: avf_showcqt.c:436
avf_showcqt.h
cm
#define cm
Definition: dvbsubdec.c:40
init_cqt
static int init_cqt(ShowCQTContext *s)
Definition: avf_showcqt.c:300
update_sono_yuv
static void update_sono_yuv(AVFrame *sono, const ColorFloat *c, int idx)
Definition: avf_showcqt.c:1067
ff_showcqt_init_x86
void ff_showcqt_init_x86(ShowCQTContext *s)
Definition: avf_showcqt_init.c:47
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
lavfutils.h
g_func
static double g_func(void *p, double x)
Definition: avf_showcqt.c:447
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:639
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
Definition: avf_showcqt.c:1480
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:271
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
audio.h
BASEFREQ
#define BASEFREQ
Definition: avf_showcqt.c:50
w
uint8_t w
Definition: llvidencdsp.c:39
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:278
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
channel_layouts
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:112
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:479
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2070
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(showcqt)
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
width
#define width
Definition: dsp.h:89
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:702
ff_scale_image
int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4], int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt, uint8_t *const src_data[4], int src_linesize[4], int src_w, int src_h, enum AVPixelFormat src_pix_fmt, void *log_ctx)
Scale image using libswscale.
Definition: lswsutils.c:22
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
lswsutils.h
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:60
draw_sono
static void draw_sono(AVFrame *out, AVFrame *sono, int off, int idx)
Definition: avf_showcqt.c:1030
src
#define src
Definition: vp8dsp.c:248
av_clipd
av_clipd
Definition: af_crystalizer.c:132
DRAW_BAR_WITH_CHROMA
#define DRAW_BAR_WITH_CHROMA(x)
Definition: avf_showcqt.c:786
init_axis_empty
static int init_axis_empty(ShowCQTContext *s)
Definition: avf_showcqt.c:399
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:229
tx.h
funcs
CheckasmFunc * funcs
Definition: checkasm.c:467
min
float min
Definition: vorbis_enc_data.h:429
render_fontconfig
static int render_fontconfig(ShowCQTContext *s, AVFrame *tmp, char *font)
Definition: avf_showcqt.c:590