Go to the documentation of this file.
   63 #define OFFSET(x) offsetof(DnnDetectContext, dnnctx.x) 
   64 #define OFFSET2(x) offsetof(DnnDetectContext, x) 
   65 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM 
   68 #if (CONFIG_LIBTENSORFLOW == 1) 
   71 #if (CONFIG_LIBOPENVINO == 1) 
   77         { 
"ssd",     
"output shape [1, 1, N, 7]",  0,                        
AV_OPT_TYPE_CONST,       { .i64 = 
DDMT_SSD },    0, 0, 
FLAGS, .unit = 
"model_type" },
 
   78         { 
"yolo",    
"output shape [1, N*Cx*Cy*DetectionBox]",  0,           
AV_OPT_TYPE_CONST,       { .i64 = 
DDMT_YOLOV1V2 },    0, 0, 
FLAGS, .unit = 
"model_type" },
 
   79         { 
"yolov3",  
"outputs shape [1, N*D, Cx, Cy]",  0,                   
AV_OPT_TYPE_CONST,       { .i64 = 
DDMT_YOLOV3 },      0, 0, 
FLAGS, .unit = 
"model_type" },
 
   80         { 
"yolov4",  
"outputs shape [1, N*D, Cx, Cy]",  0,                   
AV_OPT_TYPE_CONST,       { .i64 = 
DDMT_YOLOV4 },    0, 0, 
FLAGS, .unit = 
"model_type" },
 
   91     return 1.f / (1.f + 
exp(-x));
 
   94 static inline float linear(
float x) {
 
  102     for (
int i = 0; 
i < nb_classes; 
i++) {
 
  103         if (label_data[
i * cell_size] > max_prob) {
 
  104             max_prob = label_data[
i * cell_size];
 
  114     float overlapping_width = 
FFMIN(bbox1->
x + bbox1->
w, bbox2->
x + bbox2->
w) - 
FFMAX(bbox1->
x, bbox2->
x);
 
  115     float overlapping_height = 
FFMIN(bbox1->
y + bbox1->
h, bbox2->
y + bbox2->
h) - 
FFMAX(bbox1->
y, bbox2->
y);
 
  116     float intersection_area =
 
  117         (overlapping_width < 0 || overlapping_height < 0) ? 0 : overlapping_height * overlapping_width;
 
  118     float union_area = bbox1->
w * bbox1->
h + bbox2->
w * bbox2->
h - intersection_area;
 
  119     return intersection_area / union_area;
 
  126     float conf_threshold = 
ctx->confidence;
 
  127     int detection_boxes, box_size;
 
  128     int cell_w = 0, cell_h = 0, scale_w = 0, scale_h = 0;
 
  129     int nb_classes = 
ctx->nb_classes;
 
  131     float *anchors = 
ctx->anchors;
 
  137         cell_w = 
ctx->cell_w;
 
  138         cell_h = 
ctx->cell_h;
 
  142         if (
output[output_index].dims[2] != 
output[output_index].dims[3] &&
 
  143             output[output_index].dims[2] == 
output[output_index].dims[1]) {
 
  145             cell_w = 
output[output_index].dims[2];
 
  146             cell_h = 
output[output_index].dims[1];
 
  148             cell_w = 
output[output_index].dims[3];
 
  149             cell_h = 
output[output_index].dims[2];
 
  151         scale_w = 
ctx->scale_width;
 
  152         scale_h = 
ctx->scale_height;
 
  154     box_size = nb_classes + 5;
 
  156     switch (
ctx->model_type) {
 
  159         post_process_raw_data = 
linear;
 
  162         post_process_raw_data = 
sigmoid;
 
  166     if (!cell_h || !cell_w) {
 
  176     if (
output[output_index].dims[1] * 
output[output_index].dims[2] *
 
  177             output[output_index].dims[3] % (box_size * cell_w * cell_h)) {
 
  181     detection_boxes = 
output[output_index].dims[1] *
 
  182                       output[output_index].dims[2] *
 
  183                       output[output_index].dims[3] / box_size / cell_w / cell_h;
 
  185     anchors = anchors + (detection_boxes * output_index * 2);
 
  191     for (
int box_id = 0; box_id < detection_boxes; box_id++) {
 
  192         for (
int cx = 0; cx < cell_w; cx++)
 
  193             for (
int cy = 0; cy < cell_h; cy++) {
 
  194                 float x, y, 
w, 
h, conf;
 
  195                 float *detection_boxes_data;
 
  200                         ((cy * cell_w + cx) * detection_boxes + box_id) * box_size;
 
  201                     conf = post_process_raw_data(detection_boxes_data[4]);
 
  203                     detection_boxes_data = 
output_data + box_id * box_size * cell_w * cell_h;
 
  204                     conf = post_process_raw_data(
 
  205                                 detection_boxes_data[cy * cell_w + cx + 4 * cell_w * cell_h]);
 
  209                     x = post_process_raw_data(detection_boxes_data[0]);
 
  210                     y = post_process_raw_data(detection_boxes_data[1]);
 
  211                     w = detection_boxes_data[2];
 
  212                     h = detection_boxes_data[3];
 
  214                     conf = conf * post_process_raw_data(detection_boxes_data[label_id + 5]);
 
  216                     x = post_process_raw_data(detection_boxes_data[cy * cell_w + cx]);
 
  217                     y = post_process_raw_data(detection_boxes_data[cy * cell_w + cx + cell_w * cell_h]);
 
  218                     w = detection_boxes_data[cy * cell_w + cx + 2 * cell_w * cell_h];
 
  219                     h = detection_boxes_data[cy * cell_w + cx + 3 * cell_w * cell_h];
 
  221                         detection_boxes_data + cy * cell_w + cx + 5 * cell_w * cell_h);
 
  222                     conf = conf * post_process_raw_data(
 
  223                                 detection_boxes_data[cy * cell_w + cx + (label_id + 5) * cell_w * cell_h]);
 
  225                 if (conf < conf_threshold) {
 
  233                 bbox->
w = 
exp(
w) * anchors[box_id * 2] * 
frame->width / scale_w;
 
  234                 bbox->
h = 
exp(
h) * anchors[box_id * 2 + 1] * 
frame->height / scale_h;
 
  235                 bbox->
x = (cx + x) / cell_w * 
frame->width - bbox->
w / 2;
 
  236                 bbox->
y = (cy + y) / cell_h * 
frame->height - bbox->
h / 2;
 
  238                 if (
ctx->labels && label_id < ctx->label_count) {
 
  257     float conf_threshold = 
ctx->confidence;
 
  295             memcpy(bbox, candidate_bbox, 
sizeof(*bbox));
 
  319     for (
int i = 0; 
i < nb_outputs; 
i++) {
 
  334     float conf_threshold = 
ctx->confidence;
 
  335     int proposal_count = 0;
 
  337     float *detections = 
NULL, *labels = 
NULL;
 
  341     int scale_w = 
ctx->scale_width;
 
  342     int scale_h = 
ctx->scale_height;
 
  344     if (nb_outputs == 1 && 
output->dims[3] == 7) {
 
  345         proposal_count = 
output->dims[2];
 
  346         detect_size = 
output->dims[3];
 
  347         detections = 
output->data;
 
  348     } 
else if (nb_outputs == 2 && 
output[0].dims[3] == 5) {
 
  349         proposal_count = 
output[0].dims[2];
 
  350         detect_size = 
output[0].dims[3];
 
  351         detections = 
output[0].data;
 
  353     } 
else if (nb_outputs == 2 && 
output[1].dims[3] == 5) {
 
  354         proposal_count = 
output[1].dims[2];
 
  355         detect_size = 
output[1].dims[3];
 
  356         detections = 
output[1].data;
 
  363     if (proposal_count == 0)
 
  366     for (
int i = 0; 
i < proposal_count; ++
i) {
 
  369             conf = detections[
i * detect_size + 2];
 
  371             conf = detections[
i * detect_size + 4];
 
  372         if (conf < conf_threshold) {
 
  378     if (nb_bboxes == 0) {
 
  391     for (
int i = 0; 
i < proposal_count; ++
i) {
 
  392         av_unused int image_id = (int)detections[
i * detect_size + 0];
 
  394         float conf, x0, y0, x1, y1;
 
  396         if (nb_outputs == 1) {
 
  397             label_id = (int)detections[
i * detect_size + 1];
 
  398             conf = detections[
i * detect_size + 2];
 
  399             x0   = detections[
i * detect_size + 3];
 
  400             y0   = detections[
i * detect_size + 4];
 
  401             x1   = detections[
i * detect_size + 5];
 
  402             y1   = detections[
i * detect_size + 6];
 
  404             label_id = (int)labels[
i];
 
  405             x0     =      detections[
i * detect_size] / scale_w;
 
  406             y0     =      detections[
i * detect_size + 1] / scale_h;
 
  407             x1     =      detections[
i * detect_size + 2] / scale_w;
 
  408             y1     =      detections[
i * detect_size + 3] / scale_h;
 
  409             conf   =      detections[
i * detect_size + 4];
 
  412         if (conf < conf_threshold) {
 
  417         bbox->
x = (int)(x0 * 
frame->width);
 
  418         bbox->
w = (int)(x1 * 
frame->width) - bbox->
x;
 
  419         bbox->
y = (int)(y0 * 
frame->height);
 
  420         bbox->
h = (int)(y1 * 
frame->height) - bbox->
y;
 
  425         if (
ctx->labels && label_id < ctx->label_count) {
 
  432         if (nb_bboxes == 0) {
 
  452     switch (
ctx->model_type) {
 
  477     float conf_threshold = 
ctx->confidence;
 
  478     float *conf, *position, *label_id, x0, y0, x1, y1;
 
  486     position       = 
output[3].data;
 
  487     label_id       = 
output[2].data;
 
  495     for (
int i = 0; 
i < proposal_count; ++
i) {
 
  496         if (conf[
i] < conf_threshold)
 
  501     if (nb_bboxes == 0) {
 
  514     for (
int i = 0; 
i < proposal_count; ++
i) {
 
  515         y0 = position[
i * 4];
 
  516         x0 = position[
i * 4 + 1];
 
  517         y1 = position[
i * 4 + 2];
 
  518         x1 = position[
i * 4 + 3];
 
  522         if (conf[
i] < conf_threshold) {
 
  526         bbox->
x = (int)(x0 * 
frame->width);
 
  527         bbox->
w = (int)(x1 * 
frame->width) - bbox->
x;
 
  528         bbox->
y = (int)(y0 * 
frame->height);
 
  529         bbox->
h = (int)(y1 * 
frame->height) - bbox->
y;
 
  534         if (
ctx->labels && label_id[
i] < 
ctx->label_count) {
 
  541         if (nb_bboxes == 0) {
 
  565     for (
int i = 0; 
i < 
ctx->label_count; 
i++) {
 
  568     ctx->label_count = 0;
 
  584     while (!feof(file)) {
 
  587         if (!fgets(buf, 256, file)) {
 
  591         line_len = strlen(buf);
 
  593             int i = line_len - 1;
 
  594             if (buf[
i] == 
'\n' || buf[
i] == 
'\r' || buf[
i] == 
' ') {
 
  632     switch(backend_type) {
 
  634         if (output_nb != 4) {
 
  636                                        but get %d instead\n", output_nb);
 
  658     if (using_yolo && !
ctx->anchors) {
 
  670     if (!
ctx->bboxes_fifo)
 
  674     if (
ctx->labels_filename) {
 
  713                 *out_pts = in_frame->
pts + 
pts;
 
  782     if (
ctx->bboxes_fifo) {
 
  798     int ret, width_idx, height_idx;
 
  807     ctx->scale_width = model_input.
dims[width_idx] == -1 ? 
inlink->w :
 
  808         model_input.
dims[width_idx];
 
  809     ctx->scale_height = model_input.
dims[height_idx] ==  -1 ? 
inlink->h :
 
  810         model_input.
dims[height_idx];
 
  824     .
p.
name        = 
"dnn_detect",
 
  826     .p.priv_class  = &dnn_detect_class,
 
  
static enum AVPixelFormat pix_fmts[]
AVPixelFormat
Pixel format.
static int dnn_detect_parse_yolo_output(AVFrame *frame, DNNData *output, int output_index, AVFilterContext *filter_ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define FILTER_PIXFMTS_ARRAY(array)
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
char sep
Separator between array elements in string representations of this option, used by av_opt_set() and a...
May be set as default_val for AV_OPT_TYPE_FLAG_ARRAY options.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
#define AVERROR_EOF
End of file.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int av_fifo_peek(const AVFifo *f, void *buf, size_t nb_elems, size_t offset)
Read data from a FIFO without modifying FIFO state.
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static int read_detect_label_file(AVFilterContext *context)
static int dnn_detect_post_proc_ssd(AVFrame *frame, DNNData *output, int nb_outputs, AVFilterContext *filter_ctx)
static av_cold int dnn_detect_init(AVFilterContext *context)
static int output_data(MLPDecodeContext *m, unsigned int substr, AVFrame *frame, int *got_frame_ptr)
Write the audio data into the output buffer.
#define AV_LOG_VERBOSE
Detailed information.
static const AVFilterPad dnn_detect_inputs[]
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
static av_cold int preinit(AVFilterContext *ctx)
const char * name
Filter name.
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
AVFILTER_DNN_DEFINE_CLASS(dnn_detect, DNN_TF|DNN_OV)
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
#define AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE
static int dnn_get_width_idx_by_layout(DNNLayout layout)
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
void * priv
private data for use by the filter
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
static float dnn_detect_IOU(AVDetectionBBox *bbox1, AVDetectionBBox *bbox2)
static FilteringContext * filter_ctx
int ff_dnn_filter_init_child_class(AVFilterContext *filter)
static av_cold void dnn_detect_uninit(AVFilterContext *context)
DNNDetectionModelType model_type
A filter pad used for either input or output.
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
static int dnn_detect_post_proc_ov(AVFrame *frame, DNNData *output, int nb_outputs, AVFilterContext *filter_ctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
int ff_dnn_set_detect_post_proc(DnnContext *ctx, DetectPostProc post_proc)
static void free_detect_labels(DnnDetectContext *ctx)
static int dnn_detect_post_proc_yolov3(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx, int nb_outputs)
DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame)
static int config_input(AVFilterLink *inlink)
static float linear(float x)
const FFFilter ff_vf_dnn_detect
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int av_usleep(unsigned usec)
Sleep for a period of time.
#define FILTER_OUTPUTS(array)
#define AV_PIX_FMT_GRAYF32
int ff_dnn_get_input(DnnContext *ctx, DNNData *input)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Describe the class of an AVClass context structure.
static int dnn_detect_activate(AVFilterContext *filter_ctx)
size_t av_fifo_can_read(const AVFifo *f)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
int ff_dnn_flush(DnnContext *ctx)
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
AVDetectionBBoxHeader * av_detection_bbox_create_side_data(AVFrame *frame, uint32_t nb_bboxes)
Allocates memory for AVDetectionBBoxHeader, plus an array of.
DNNBackendType backend_type
int(* init)(AVBSFContext *ctx)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
@ AV_OPT_TYPE_FLAG_ARRAY
May be combined with another regular option type to declare an array option.
static AVRational av_make_q(int num, int den)
Create an AVRational.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFilterContext * src
source filter
static float sigmoid(float x)
static const uint8_t header[24]
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static const AVOption dnn_detect_options[]
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
static int dnn_detect_flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_pts)
static int dnn_detect_post_proc(AVFrame *frame, DNNData *output, uint32_t nb, AVFilterContext *filter_ctx)
static void uninit(AVBSFContext *ctx)
#define i(width, name, range_min, range_max)
static int check_output_nb(DnnDetectContext *ctx, DNNBackendType backend_type, int output_nb)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Pad name.
FILE * avpriv_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
static int dnn_detect_post_proc_yolo(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx)
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
AVRational detect_confidence
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
@ AV_OPT_TYPE_INT
Underlying C type is int.
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
static const AVOptionArrayDef anchor_array_def
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
char * av_strdup(const char *s)
Duplicate a string.
AVFilter p
The public AVFilter.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static int dnn_detect_fill_side_data(AVFrame *frame, AVFilterContext *filter_ctx)
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Structure to hold side data for an AVFrame.
int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
static int dnn_detect_get_label_id(int nb_classes, int cell_size, float *label_data)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
void ff_dnn_uninit(DnnContext *ctx)
static int dnn_detect_post_proc_tf(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx)
int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.