Go to the documentation of this file.
   59 #define OFFSET(x) offsetof(ColorBalanceContext, x) 
   60 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM 
  102                            float s, 
float m, 
float h)
 
  104     const float a = 4.f, 
b = 0.333f, scale = 0.7f;
 
  106     s *= av_clipf((
b - l) * 
a + 0.5
f, 0, 1) * scale;
 
  107     m *= av_clipf((l - 
b) * 
a + 0.5
f, 0, 1) * av_clipf((1.0 - l - 
b) * 
a + 0.5
f, 0, 1) * scale;
 
  108     h *= av_clipf((l + 
b - 1) * 
a + 0.5
f, 0, 1) * scale;
 
  114     return av_clipf(v + 0.5
f, 0, 1);
 
  117 static float hfun(
float n, 
float h, 
float s, 
float l)
 
  119     float a = 
s * 
FFMIN(l, 1. - l);
 
  120     float k = fmodf(n + 
h / 30.
f, 12.
f);
 
  122     return av_clipf(l - 
a * 
FFMAX(
FFMIN3(k - 3.
f, 9.
f - k, 1), -1.
f), 0, 1);
 
  133     if (*
r == *
g && *
g == *
b) {
 
  135     } 
else if (
max == *
r) {
 
  136         h = 60. * (0. + (*
g - *
b) / (
max - 
min));
 
  137     } 
else if (
max == *
g) {
 
  138         h = 60. * (2. + (*
b - *
r) / (
max - 
min));
 
  139     } 
else if (
max == *
b) {
 
  140         h = 60. * (4. + (*
r - *
g) / (
max - 
min));
 
  147     if (
max == 0. || 
min == 1.) {
 
  164     const int slice_start = (
out->height * jobnr) / nb_jobs;
 
  165     const int slice_end = (
out->height * (jobnr+1)) / nb_jobs;
 
  166     const uint8_t *srcg = 
in->data[0] + slice_start * 
in->linesize[0];
 
  167     const uint8_t *srcb = 
in->data[1] + slice_start * 
in->linesize[1];
 
  168     const uint8_t *srcr = 
in->data[2] + slice_start * 
in->linesize[2];
 
  169     const uint8_t *srca = 
in->data[3] + slice_start * 
in->linesize[3];
 
  170     uint8_t *dstg = 
out->data[0] + slice_start * 
out->linesize[0];
 
  171     uint8_t *dstb = 
out->data[1] + slice_start * 
out->linesize[1];
 
  172     uint8_t *dstr = 
out->data[2] + slice_start * 
out->linesize[2];
 
  173     uint8_t *dsta = 
out->data[3] + slice_start * 
out->linesize[3];
 
  174     const float max = 
s->max;
 
  178         for (j = 0; j < 
out->width; j++) {
 
  179             float r = srcr[j] / 
max;
 
  180             float g = srcg[j] / 
max;
 
  181             float b = srcb[j] / 
max;
 
  184             r = 
get_component(
r, l, 
s->cyan_red.shadows, 
s->cyan_red.midtones, 
s->cyan_red.highlights);
 
  185             g = 
get_component(
g, l, 
s->magenta_green.shadows, 
s->magenta_green.midtones, 
s->magenta_green.highlights);
 
  186             b = 
get_component(
b, l, 
s->yellow_blue.shadows, 
s->yellow_blue.midtones, 
s->yellow_blue.highlights);
 
  188             if (
s->preserve_lightness)
 
  191             dstr[j] = av_clip_uint8(
r * 
max);
 
  192             dstg[j] = av_clip_uint8(
g * 
max);
 
  193             dstb[j] = av_clip_uint8(
b * 
max);
 
  198         srcg += 
in->linesize[0];
 
  199         srcb += 
in->linesize[1];
 
  200         srcr += 
in->linesize[2];
 
  201         srca += 
in->linesize[3];
 
  202         dstg += 
out->linesize[0];
 
  203         dstb += 
out->linesize[1];
 
  204         dstr += 
out->linesize[2];
 
  205         dsta += 
out->linesize[3];
 
  217     const int slice_start = (
out->height * jobnr) / nb_jobs;
 
  218     const int slice_end = (
out->height * (jobnr+1)) / nb_jobs;
 
  219     const uint16_t *srcg = (
const uint16_t *)
in->data[0] + slice_start * 
in->linesize[0] / 2;
 
  220     const uint16_t *srcb = (
const uint16_t *)
in->data[1] + slice_start * 
in->linesize[1] / 2;
 
  221     const uint16_t *srcr = (
const uint16_t *)
in->data[2] + slice_start * 
in->linesize[2] / 2;
 
  222     const uint16_t *srca = (
const uint16_t *)
in->data[3] + slice_start * 
in->linesize[3] / 2;
 
  223     uint16_t *dstg = (uint16_t *)
out->data[0] + slice_start * 
out->linesize[0] / 2;
 
  224     uint16_t *dstb = (uint16_t *)
out->data[1] + slice_start * 
out->linesize[1] / 2;
 
  225     uint16_t *dstr = (uint16_t *)
out->data[2] + slice_start * 
out->linesize[2] / 2;
 
  226     uint16_t *dsta = (uint16_t *)
out->data[3] + slice_start * 
out->linesize[3] / 2;
 
  227     const int depth = 
s->depth;
 
  228     const float max = 
s->max;
 
  232         for (j = 0; j < 
out->width; j++) {
 
  233             float r = srcr[j] / 
max;
 
  234             float g = srcg[j] / 
max;
 
  235             float b = srcb[j] / 
max;
 
  238             r = 
get_component(
r, l, 
s->cyan_red.shadows, 
s->cyan_red.midtones, 
s->cyan_red.highlights);
 
  239             g = 
get_component(
g, l, 
s->magenta_green.shadows, 
s->magenta_green.midtones, 
s->magenta_green.highlights);
 
  240             b = 
get_component(
b, l, 
s->yellow_blue.shadows, 
s->yellow_blue.midtones, 
s->yellow_blue.highlights);
 
  242             if (
s->preserve_lightness)
 
  252         srcg += 
in->linesize[0] / 2;
 
  253         srcb += 
in->linesize[1] / 2;
 
  254         srcr += 
in->linesize[2] / 2;
 
  255         srca += 
in->linesize[3] / 2;
 
  256         dstg += 
out->linesize[0] / 2;
 
  257         dstb += 
out->linesize[1] / 2;
 
  258         dstr += 
out->linesize[2] / 2;
 
  259         dsta += 
out->linesize[3] / 2;
 
  272     const int slice_start = (
out->height * jobnr) / nb_jobs;
 
  273     const int slice_end = (
out->height * (jobnr+1)) / nb_jobs;
 
  274     const uint8_t *srcrow = 
in->data[0] + slice_start * 
in->linesize[0];
 
  279     const float max = 
s->max;
 
  280     const int step = 
s->step;
 
  284     dstrow = 
out->data[0] + slice_start * 
out->linesize[0];
 
  289         for (j = 0; j < outlink->
w * 
step; j += 
step) {
 
  290             float r = 
src[j + roffset] / 
max;
 
  291             float g = 
src[j + goffset] / 
max;
 
  292             float b = 
src[j + boffset] / 
max;
 
  295             r = 
get_component(
r, l, 
s->cyan_red.shadows, 
s->cyan_red.midtones, 
s->cyan_red.highlights);
 
  296             g = 
get_component(
g, l, 
s->magenta_green.shadows, 
s->magenta_green.midtones, 
s->magenta_green.highlights);
 
  297             b = 
get_component(
b, l, 
s->yellow_blue.shadows, 
s->yellow_blue.midtones, 
s->yellow_blue.highlights);
 
  299             if (
s->preserve_lightness)
 
  302             dst[j + roffset] = av_clip_uint8(
r * 
max);
 
  303             dst[j + goffset] = av_clip_uint8(
g * 
max);
 
  304             dst[j + boffset] = av_clip_uint8(
b * 
max);
 
  306                 dst[j + aoffset] = 
src[j + aoffset];
 
  309         srcrow += 
in->linesize[0];
 
  310         dstrow += 
out->linesize[0];
 
  323     const int slice_start = (
out->height * jobnr) / nb_jobs;
 
  324     const int slice_end = (
out->height * (jobnr+1)) / nb_jobs;
 
  325     const uint16_t *srcrow = (
const uint16_t *)
in->data[0] + slice_start * 
in->linesize[0] / 2;
 
  330     const int step = 
s->step / 2;
 
  331     const int depth = 
s->depth;
 
  332     const float max = 
s->max;
 
  336     dstrow = (uint16_t *)
out->data[0] + slice_start * 
out->linesize[0] / 2;
 
  338         const uint16_t *
src = srcrow;
 
  339         uint16_t *dst = dstrow;
 
  341         for (j = 0; j < outlink->
w * 
step; j += 
step) {
 
  342             float r = 
src[j + roffset] / 
max;
 
  343             float g = 
src[j + goffset] / 
max;
 
  344             float b = 
src[j + boffset] / 
max;
 
  347             r = 
get_component(
r, l, 
s->cyan_red.shadows, 
s->cyan_red.midtones, 
s->cyan_red.highlights);
 
  348             g = 
get_component(
g, l, 
s->magenta_green.shadows, 
s->magenta_green.midtones, 
s->magenta_green.highlights);
 
  349             b = 
get_component(
b, l, 
s->yellow_blue.shadows, 
s->yellow_blue.midtones, 
s->yellow_blue.highlights);
 
  351             if (
s->preserve_lightness)
 
  358                 dst[j + aoffset] = 
src[j + aoffset];
 
  361         srcrow += 
in->linesize[0] / 2;
 
  362         dstrow += 
out->linesize[0] / 2;
 
  373     const int depth = 
desc->comp[0].depth;
 
  374     const int max = (1 << depth) - 1;
 
  384     } 
else if (
max == 255) {
 
  443     .
name          = 
"colorbalance",
 
  446     .priv_class    = &colorbalance_class,
 
  
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_PIX_FMT_GBRAP16
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVPixelFormat
Pixel format.
static const AVFilterPad colorbalance_outputs[]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static float get_component(float v, float l, float s, float m, float h)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static const AVFilterPad colorbalance_inputs[]
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int(* color_balance)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
This structure describes decoded (raw) audio or video data.
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static int query_formats(AVFilterContext *ctx)
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
const char * name
Filter name.
AVFormatInternal * internal
An opaque field for libavformat internal usage.
A link between two filters.
static void preservel(float *r, float *g, float *b, float l)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
static int color_balance16_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_GBRP10
A filter pad used for either input or output.
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP12
static const AVOption colorbalance_options[]
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
static const AVFilterPad outputs[]
static enum AVPixelFormat pix_fmts[]
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_RGBA64
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
static float hfun(float n, float h, float s, float l)
AVFILTER_DEFINE_CLASS(colorbalance)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
static int color_balance16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int format
agreed upon media format
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
static int color_balance8_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
AVFilterContext * src
source filter
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int config_output(AVFilterLink *outlink)
#define AV_PIX_FMT_BGRA64
#define i(width, name, range_min, range_max)
int w
agreed upon image width
#define AV_PIX_FMT_GBRP12
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Used for passing data between threads.
static int color_balance8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
const char * name
Pad name.
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
int h
agreed upon image height
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
AVFilter ff_vf_colorbalance
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
#define flags(name, subs,...)
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar