Go to the documentation of this file.
155 #define YAE_ATEMPO_MIN 0.5
156 #define YAE_ATEMPO_MAX 100.0
158 #define OFFSET(x) offsetof(ATempoContext, x)
161 {
"tempo",
"set tempo scale factor",
178 return &atempo->
frag[(atempo->
nfrag + 1) % 2];
247 #define RE_MALLOC_OR_FAIL(field, field_size) \
250 field = av_malloc(field_size); \
252 yae_release_buffers(atempo); \
253 return AVERROR(ENOMEM); \
267 uint32_t nlevels = 0;
283 if (pot < atempo->
window) {
322 double t = (double)
i / (
double)(atempo->
window - 1);
323 double h = 0.5 * (1.0 - cos(2.0 *
M_PI * t));
324 atempo->
hann[
i] = (float)
h;
346 #define yae_init_xdat(scalar_type, scalar_max) \
348 const uint8_t *src_end = src + \
349 frag->nsamples * atempo->channels * sizeof(scalar_type); \
351 FFTSample *xdat = frag->xdat; \
354 if (atempo->channels == 1) { \
355 for (; src < src_end; xdat++) { \
356 tmp = *(const scalar_type *)src; \
357 src += sizeof(scalar_type); \
359 *xdat = (FFTSample)tmp; \
362 FFTSample s, max, ti, si; \
365 for (; src < src_end; xdat++) { \
366 tmp = *(const scalar_type *)src; \
367 src += sizeof(scalar_type); \
369 max = (FFTSample)tmp; \
370 s = FFMIN((FFTSample)scalar_max, \
371 (FFTSample)fabsf(max)); \
373 for (i = 1; i < atempo->channels; i++) { \
374 tmp = *(const scalar_type *)src; \
375 src += sizeof(scalar_type); \
377 ti = (FFTSample)tmp; \
378 si = FFMIN((FFTSample)scalar_max, \
379 (FFTSample)fabsf(ti)); \
431 const int read_size = stop_here - atempo->
position[0];
433 if (stop_here <= atempo->position[0]) {
440 while (atempo->
position[0] < stop_here &&
src < src_end) {
441 int src_samples = (src_end -
src) / atempo->
stride;
444 int nsamples =
FFMIN(read_size, src_samples);
448 nsamples =
FFMIN(nsamples, atempo->
ring);
506 int64_t missing, start, zeros;
509 int i0, i1, n0, n1, na, nb;
512 if (src_ref &&
yae_load_data(atempo, src_ref, src_end, stop_here) != 0) {
519 stop_here - atempo->
position[0] : 0;
522 missing < (int64_t)atempo->
window ?
523 (uint32_t)(atempo->
window - missing) : 0;
537 memset(dst, 0, zeros * atempo->
stride);
538 dst += zeros * atempo->
stride;
541 if (zeros == nsamples) {
558 i0 = frag->
position[0] + zeros - start;
559 i1 = i0 < na ? 0 : i0 - na;
561 n0 = i0 < na ?
FFMIN(na - i0, (
int)(nsamples - zeros)) : 0;
562 n1 = nsamples - zeros - n0;
566 dst += n0 * atempo->
stride;
581 const double fragment_step = atempo->
tempo * (double)(atempo->
window / 2);
620 for (
i = 1;
i <
window;
i++, xa++, xb++, xc++) {
621 xc->
re = (xa->re * xb->re + xa->im * xb->im);
622 xc->
im = (xa->im * xb->re - xa->re * xb->im);
643 int best_offset = -drift;
667 for (
i = i0;
i < i1;
i++, xcorr++) {
674 if (metric > best_metric) {
675 best_metric = metric;
694 const double prev_output_position =
698 const double ideal_output_position =
701 const int drift = (
int)(prev_output_position - ideal_output_position);
703 const int delta_max = atempo->
window / 2;
727 #define yae_blend(scalar_type) \
729 const scalar_type *aaa = (const scalar_type *)a; \
730 const scalar_type *bbb = (const scalar_type *)b; \
732 scalar_type *out = (scalar_type *)dst; \
733 scalar_type *out_end = (scalar_type *)dst_end; \
736 for (i = 0; i < overlap && out < out_end; \
737 i++, atempo->position[1]++, wa++, wb++) { \
742 for (j = 0; j < atempo->channels; \
743 j++, aaa++, bbb++, out++) { \
744 float t0 = (float)*aaa; \
745 float t1 = (float)*bbb; \
748 frag->position[0] + i < 0 ? \
750 (scalar_type)(t0 * w0 + t1 * w1); \
753 dst = (uint8_t *)out; \
778 const int64_t overlap = stop_here - start_here;
780 const int64_t ia = start_here - prev->
position[1];
781 const int64_t
ib = start_here - frag->
position[1];
783 const float *wa = atempo->
hann + ia;
784 const float *wb = atempo->
hann +
ib;
793 overlap <= frag->nsamples);
839 if (!atempo->
nfrag) {
912 if (!atempo->
nfrag) {
946 while (atempo->
position[1] < overlap_end) {
962 av_assert0(start_here <= stop_here && frag->position[1] <= start_here);
967 src_size = (
int)(stop_here - start_here) * atempo->
stride;
968 dst_size = dst_end - dst;
969 nbytes =
FFMIN(src_size, dst_size);
971 memcpy(dst,
src, nbytes);
1085 int n_out = (
int)(0.5 + ((
double)n_in) / atempo->
tempo);
1095 while (
src < src_end) {
1135 int n_max = atempo->
ring;
1139 while (err ==
AVERROR(EAGAIN)) {
1213 .priv_class = &atempo_class,
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
static int yae_update(AVFilterContext *ctx)
A list of supported channel layouts.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int push_samples(ATempoContext *atempo, AVFilterLink *outlink, int n_out)
static int config_props(AVFilterLink *inlink)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static enum AVSampleFormat sample_fmts[]
enum MovChannelLayoutTag * layouts
#define AVERROR_EOF
End of file.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static void yae_downmix(ATempoContext *atempo, AudioFragment *frag)
Initialize complex data buffer of a given audio fragment with down-mixed mono data of appropriate sca...
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int yae_load_data(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end, int64_t stop_here)
Populate the internal data buffer on as-needed basis.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
#define AV_OPT_FLAG_RUNTIME_PARAM
a generic parameter which can be set by the user at runtime
const char * name
Filter name.
A link between two filters.
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
static av_cold int init(AVFilterContext *ctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static SDL_Window * window
static int query_formats(AVFilterContext *ctx)
static const AVFilterPad atempo_outputs[]
A filter pad used for either input or output.
static void yae_apply(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end, uint8_t **dst_ref, uint8_t *dst_end)
Feed as much data to the filter as it is able to consume and receive as much processed data in the de...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
#define yae_init_xdat(scalar_type, scalar_max)
A helper macro for initializing complex data buffer with scalar data of a given type.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const AVFilterPad outputs[]
RDFTContext * real_to_complex
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
void av_rdft_calc(RDFTContext *s, FFTSample *data)
#define AV_OPT_FLAG_AUDIO_PARAM
#define RE_MALLOC_OR_FAIL(field, field_size)
RDFTContext * complex_to_real
static int yae_align(AudioFragment *frag, const AudioFragment *prev, const int window, const int delta_max, const int drift, FFTSample *correlation, RDFTContext *complex_to_real)
Calculate alignment offset for given fragment relative to the previous fragment.
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Rational number (pair of numerator and denominator).
AVFILTER_DEFINE_CLASS(atempo)
static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
static AudioFragment * yae_curr_frag(ATempoContext *atempo)
static int yae_reset(ATempoContext *atempo, enum AVSampleFormat format, int sample_rate, int channels)
Prepare filter for processing audio data of given format, sample rate and number of channels.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static void correlation(int32_t *corr, int32_t *ener, int16_t *buffer, int16_t lag, int16_t blen, int16_t srange, int16_t scale)
static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
static int yae_overlap_add(ATempoContext *atempo, uint8_t **dst_ref, uint8_t *dst_end)
Blend the overlap region of previous and current audio fragment and output the results to the given d...
static int yae_load_frag(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end)
Populate current audio fragment data buffer.
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static AudioFragment * yae_prev_frag(ATempoContext *atempo)
int sample_rate
Sample rate of the audio data.
#define AV_NOPTS_VALUE
Undefined timestamp value.
AVFilterContext * src
source filter
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define yae_blend(scalar_type)
A helper macro for blending the overlap region of previous and current audio fragment.
int sample_rate
samples per second
int nb_samples
number of audio samples (per channel) described by this frame
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
static void yae_advance_to_next_frag(ATempoContext *atempo)
Prepare for loading next audio fragment.
static void yae_clear(ATempoContext *atempo)
Reset filter to initial state, do not deallocate existing local buffers.
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16
signed 16 bits
const char * name
Pad name.
static const AVFilterPad atempo_inputs[]
static int yae_flush(ATempoContext *atempo, uint8_t **dst_ref, uint8_t *dst_end)
Flush any buffered data from the filter.
static const AVOption atempo_options[]
static int request_frame(AVFilterLink *outlink)
static void yae_xcorr_via_rdft(FFTSample *xcorr, RDFTContext *complex_to_real, const FFTComplex *xa, const FFTComplex *xb, const int window)
Calculate cross-correlation via rDFT.
enum AVSampleFormat format
static int yae_adjust_position(ATempoContext *atempo)
Adjust current fragment position for better alignment with previous fragment.
FilterState
Filter state machine states.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static void yae_release_buffers(ATempoContext *atempo)
Reset filter to initial state and deallocate all buffers.
static av_cold void uninit(AVFilterContext *ctx)
#define flags(name, subs,...)
void av_rdft_end(RDFTContext *s)
@ AV_SAMPLE_FMT_DBL
double
@ AV_SAMPLE_FMT_S32
signed 32 bits
A fragment of audio waveform.