Go to the documentation of this file.
36 #include <OpenGL/gl3.h>
43 #if HAVE_GLXGETPROCADDRESS
76 #define FF_GL_RED_COMPONENT GL_RED
77 #elif defined(GL_LUMINANCE)
78 #define FF_GL_RED_COMPONENT GL_LUMINANCE
80 #define FF_GL_RED_COMPONENT 0x1903; //GL_RED
84 #define FF_GL_UNSIGNED_BYTE_3_3_2 0x8032
85 #define FF_GL_UNSIGNED_BYTE_2_3_3_REV 0x8362
86 #define FF_GL_UNSIGNED_SHORT_1_5_5_5_REV 0x8366
87 #define FF_GL_UNPACK_ROW_LENGTH 0x0CF2
90 #define FF_GL_ARRAY_BUFFER 0x8892
91 #define FF_GL_ELEMENT_ARRAY_BUFFER 0x8893
92 #define FF_GL_STATIC_DRAW 0x88E4
93 #define FF_GL_FRAGMENT_SHADER 0x8B30
94 #define FF_GL_VERTEX_SHADER 0x8B31
95 #define FF_GL_COMPILE_STATUS 0x8B81
96 #define FF_GL_LINK_STATUS 0x8B82
97 #define FF_GL_INFO_LOG_LENGTH 0x8B84
152 #define OPENGL_ERROR_CHECK(ctx) \
155 if ((err_code = glGetError()) != GL_NO_ERROR) { \
156 av_log(ctx, AV_LOG_ERROR, "OpenGL error occurred in '%s', line %d: %d\n", __func__, __LINE__, err_code); \
179 SDL_GLContext glcontext;
355 while (SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT) > 0) {
356 switch (event.type) {
360 switch (event.key.keysym.sym) {
366 case SDL_WINDOWEVENT:
367 switch(event.window.event) {
368 case SDL_WINDOWEVENT_RESIZED:
369 case SDL_WINDOWEVENT_SIZE_CHANGED:
370 SDL_GL_GetDrawableSize(opengl->window, &
message.width, &
message.height);
384 if (SDL_Init(SDL_INIT_VIDEO)) {
389 SDL_WINDOWPOS_UNDEFINED,
390 SDL_WINDOWPOS_UNDEFINED,
392 SDL_WINDOW_RESIZABLE | SDL_WINDOW_OPENGL);
393 if (!opengl->window) {
394 av_log(opengl,
AV_LOG_ERROR,
"Unable to create default window: %s\n", SDL_GetError());
397 opengl->glcontext = SDL_GL_CreateContext(opengl->window);
398 if (!opengl->glcontext) {
399 av_log(opengl,
AV_LOG_ERROR,
"Unable to create OpenGL context on default window: %s\n", SDL_GetError());
402 SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
403 SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
404 SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
405 SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
406 SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
408 SDL_GL_GetDrawableSize(opengl->window, &
message.width, &
message.height);
416 #define LOAD_OPENGL_FUN(name, type) \
417 procs->name = (type)SDL_GL_GetProcAddress(#name); \
418 if (!procs->name) { \
419 av_log(opengl, AV_LOG_ERROR, "Cannot load OpenGL function: '%s'\n", #name); \
420 return AVERROR(ENOSYS); \
451 #undef LOAD_OPENGL_FUN
455 #if defined(__APPLE__)
462 return opengl_sdl_load_procedures(opengl);
497 #if HAVE_GLXGETPROCADDRESS
498 #define SelectedGetProcAddress glXGetProcAddress
499 #elif HAVE_WGLGETPROCADDRESS
500 #define SelectedGetProcAddress wglGetProcAddress
503 #define LOAD_OPENGL_FUN(name, type) \
504 procs->name = (type)SelectedGetProcAddress(#name); \
505 if (!procs->name) { \
506 av_log(opengl, AV_LOG_ERROR, "Cannot load OpenGL function: '%s'\n", #name); \
507 return AVERROR(ENOSYS); \
512 return opengl_sdl_load_procedures(opengl);
543 #undef SelectedGetProcAddress
544 #undef LOAD_OPENGL_FUN
550 memset(
matrix, 0, 16 *
sizeof(
float));
555 float bottom,
float top,
float nearZ,
float farZ)
557 float ral = right +
left;
558 float rsl = right -
left;
559 float tab = top + bottom;
560 float tsb = top - bottom;
561 float fan = farZ + nearZ;
562 float fsn = farZ - nearZ;
564 memset(
matrix, 0, 16 *
sizeof(
float));
578 const char *extension;
581 } required_extensions[] = {
582 {
"GL_ARB_multitexture", 1, 3 },
583 {
"GL_ARB_vertex_buffer_object", 1, 5 },
584 {
"GL_ARB_vertex_shader", 2, 0 },
585 {
"GL_ARB_fragment_shader", 2, 0 },
586 {
"GL_ARB_shader_objects", 2, 0 },
590 const char *extensions, *
version;
592 version = glGetString(GL_VERSION);
593 extensions = glGetString(GL_EXTENSIONS);
600 if (sscanf(
version,
"%d.%d", &major, &minor) != 2)
603 for (
i = 0; required_extensions[
i].extension;
i++) {
604 if (major < required_extensions[
i].major &&
605 (major == required_extensions[
i].major && minor < required_extensions[
i].minor) &&
606 !strstr(extensions, required_extensions[
i].extension)) {
608 required_extensions[
i].extension);
615 opengl->
non_pow_2_textures = major >= 2 || strstr(extensions,
"GL_ARB_texture_non_power_of_two");
616 #if defined(GL_ES_VERSION_2_0)
617 opengl->
unpack_subimage = !!strstr(extensions,
"GL_EXT_unpack_subimage");
647 case GL_UNSIGNED_SHORT:
649 case GL_UNSIGNED_SHORT_5_6_5:
651 case GL_UNSIGNED_BYTE:
697 int *out_width,
int *out_height)
700 *out_width = in_width;
701 *out_height = in_height;
704 unsigned power_of_2 = 1;
705 while (power_of_2 <
max)
707 *out_height = power_of_2;
708 *out_width = power_of_2;
710 in_width, in_height, *out_width, *out_height);
731 #define FILL_COMPONENT(i) { \
732 shift = (desc->comp[i].depth - 1) >> 3; \
733 opengl->color_map[(i << 2) + (desc->comp[i].offset >> shift)] = 1.0; \
743 #undef FILL_COMPONENT
782 if (!fragment_shader_code) {
795 fragment_shader_code);
850 int new_width, new_height;
852 glBindTexture(GL_TEXTURE_2D, texture);
853 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
854 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
855 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
856 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
857 glTexImage2D(GL_TEXTURE_2D, 0, opengl->
format, new_width, new_height, 0,
917 for (
i = 0;
i < 4;
i++)
943 if ((
ret = opengl_sdl_create_window(
h)) < 0) {
948 av_log(opengl,
AV_LOG_ERROR,
"FFmpeg is compiled without SDL. Cannot create default window.\n");
975 SDL_GL_DeleteContext(opengl->glcontext);
976 SDL_DestroyWindow(opengl->window);
1020 if (
desc->nb_components > 1) {
1022 int num_planes =
desc->nb_components - (has_alpha ? 1 : 0);
1030 for (
i = 1;
i < num_planes;
i++)
1046 glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
1049 (
float)opengl->
background[2] / 255.0f, 1.0f);
1068 "The opengl output device is deprecated due to being fundamentally incompatible with libavformat API. "
1069 "For monitoring purposes in ffmpeg you can output to a file or use pipes and a video player.\n"
1070 "Example: ffmpeg -i INPUT -f nut -c:v rawvideo - | ffplay -loglevel warning -vf setpts=0 -\n"
1075 if (
h->nb_streams != 1 ||
1078 av_log(opengl,
AV_LOG_ERROR,
"Only a single raw or wrapped avframe video stream is supported.\n");
1118 glClear(GL_COLOR_BUFFER_BIT);
1122 SDL_GL_SwapWindow(opengl->window);
1148 int plane =
desc->comp[comp_index].plane;
1158 data += width_chroma * height_chroma * wordsize;
1162 data += 2 * width_chroma * height_chroma * wordsize;
1170 #define LOAD_TEXTURE_DATA(comp_index, sub) \
1172 int width = sub ? AV_CEIL_RSHIFT(opengl->width, desc->log2_chroma_w) : opengl->width; \
1173 int height = sub ? AV_CEIL_RSHIFT(opengl->height, desc->log2_chroma_h): opengl->height; \
1175 int plane = desc->comp[comp_index].plane; \
1177 glBindTexture(GL_TEXTURE_2D, opengl->texture_name[comp_index]); \
1179 GLint length = ((AVFrame *)input)->linesize[plane]; \
1180 int bytes_per_pixel = opengl_type_size(opengl->type); \
1181 if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) \
1182 bytes_per_pixel *= desc->nb_components; \
1183 data = ((AVFrame *)input)->data[plane]; \
1184 if (!(length % bytes_per_pixel) && \
1185 (opengl->unpack_subimage || ((length / bytes_per_pixel) == width))) { \
1186 length /= bytes_per_pixel; \
1187 if (length != width) \
1188 glPixelStorei(FF_GL_UNPACK_ROW_LENGTH, length); \
1189 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, \
1190 opengl->format, opengl->type, data); \
1191 if (length != width) \
1192 glPixelStorei(FF_GL_UNPACK_ROW_LENGTH, 0); \
1195 for (h = 0; h < height; h++) { \
1196 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, h, width, 1, \
1197 opengl->format, opengl->type, data); \
1202 data = opengl_get_plane_pointer(opengl, input, comp_index, desc); \
1203 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, \
1204 opengl->format, opengl->type, data); \
1217 if (opengl->glcontext)
1218 SDL_GL_MakeCurrent(opengl->window, opengl->glcontext);
1220 if (!opengl->
no_window && (
ret = opengl_sdl_process_events(
h)) < 0)
1229 glClear(GL_COLOR_BUFFER_BIT);
1233 glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
1262 SDL_GL_SwapWindow(opengl->window);
1294 #define OFFSET(x) offsetof(OpenGLContext, x)
1295 #define ENC AV_OPT_FLAG_ENCODING_PARAM
#define AV_PIX_FMT_YUVA422P16
GLuint vertex_buffer
Vertex buffer.
#define AV_PIX_FMT_GBRAP16
#define AV_LOG_WARNING
Something somehow does not look correct.
static const char *const FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET
Fragment shader for packet RGBA formats.
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
void(APIENTRY * FF_PFNGLSHADERSOURCEPROC)(GLuint shader, GLsizei count, const char **string, const GLint *length)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
static const AVOption options[]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GLuint(APIENTRY * FF_PFNGLCREATESHADERPROC)(GLenum type)
enum AVMediaType codec_type
General type of the encoded data.
const FFOutputFormat ff_opengl_muxer
This struct describes the properties of an encoded stream.
GLint chroma_div_h_location
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
FF_PFNGLGETPROGRAMIVPROC glGetProgramiv
GLint texture_location[4]
static av_cold int opengl_init_context(OpenGLContext *opengl)
@ AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER
Prepare window buffer message.
This structure describes decoded (raw) audio or video data.
static int opengl_control_message(AVFormatContext *h, int type, void *data, size_t data_size)
#define AV_PIX_FMT_YUVA420P16
static const char * opengl_get_fragment_shader_code(enum AVPixelFormat format)
GLint max_viewport_width
Maximum viewport size.
int avdevice_dev_to_app_control_message(struct AVFormatContext *s, enum AVDevToAppMessageType type, void *data, size_t data_size)
Send control message from device to application.
static const char *const FF_OPENGL_VERTEX_SHADER
static const GLushort g_index[6]
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
GLuint vertex_shader
Vertex shader.
int no_window
0 for create default window
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
void(APIENTRY * FF_PFNGLDELETEBUFFERSPROC)(GLsizei n, const GLuint *buffers)
void(APIENTRY * FF_PFNGLATTACHSHADERPROC)(GLuint program, GLuint shader)
GLfloat color_map[16]
RGBA color map matrix.
static av_cold GLuint opengl_load_shader(OpenGLContext *opengl, GLenum type, const char *source)
@ AV_APP_TO_DEV_WINDOW_REPAINT
Repaint request message.
static const char *const FF_OPENGL_FRAGMENT_SHADER_GRAY
void(APIENTRY * FF_PFNGLCOMPILESHADERPROC)(GLuint shader)
void(APIENTRY * FF_PFNGLGETSHADERIVPROC)(GLuint shader, GLenum pname, GLint *params)
#define FF_GL_UNSIGNED_SHORT_1_5_5_5_REV
GLfloat chroma_div_h
Chroma subsampling h ratio.
#define FF_GL_UNSIGNED_BYTE_2_3_3_REV
static SDL_Window * window
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
void(APIENTRY * FF_PFNGLUNIFORM1IPROC)(GLint location, GLint v0)
FF_PFNGLCREATEPROGRAMPROC glCreateProgram
#define AV_PIX_FMT_YUVA444P16
static void opengl_compute_display_area(AVFormatContext *s)
static const struct twinvq_data tab
GLuint texture_name[4]
Textures' IDs.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_PIX_FMT_GRAY16
static int opengl_write_frame(AVFormatContext *h, int stream_index, AVFrame **frame, unsigned flags)
static av_cold int opengl_configure_texture(OpenGLContext *opengl, GLuint texture, GLsizei width, GLsizei height)
FF_PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv
void(APIENTRY * FF_PFNGLGETSHADERINFOLOGPROC)(GLuint shader, GLsizei bufSize, GLsizei *length, char *infoLog)
GLuint fragment_shader
Fragment shader for current pix_pmt.
FF_PFNGLCREATESHADERPROC glCreateShader
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
GLint texture_coords_attrib
static int opengl_prepare(OpenGLContext *opengl)
#define FF_ARRAY_ELEMS(a)
static av_cold void opengl_get_texture_size(OpenGLContext *opengl, int in_width, int in_height, int *out_width, int *out_height)
#define AV_PIX_FMT_YUV422P16
int picture_height
Rendered height.
static void opengl_make_ortho(float matrix[16], float left, float right, float bottom, float top, float nearZ, float farZ)
@ AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER
Display window buffer message.
GLint position_attrib
Attibutes' locations.
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define LOAD_OPENGL_FUN(name, type)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
static enum AVPixelFormat pix_fmt
int unpack_subimage
1 when GL_EXT_unpack_subimage is available
FF_PFNGLUNIFORM1IPROC glUniform1i
void(APIENTRY * FF_PFNGLBINDBUFFERPROC)(GLenum target, GLuint buffer)
@ AV_DEV_TO_APP_CREATE_WINDOW_BUFFER
Create window buffer message.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static int opengl_write_packet(AVFormatContext *h, AVPacket *pkt)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUV420P16
static void opengl_make_identity(float matrix[16])
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
static const char *const FF_OPENGL_FRAGMENT_SHADER_RGB_PLANAR
Fragment shader for planar RGB formats.
void(APIENTRY * FF_PFNGLBUFFERDATAPROC)(GLenum target, ptrdiff_t size, const GLvoid *data, GLenum usage)
#define FF_GL_FRAGMENT_SHADER
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_CODEC_ID_WRAPPED_AVFRAME
Passthrough codec, AVFrames wrapped in AVPacket.
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
static const struct OpenGLFormatDesc opengl_format_desc[]
void(APIENTRY * FF_PFNGLUNIFORM1FPROC)(GLint location, GLfloat v0)
static const char * window_title
GLint max_viewport_height
Maximum viewport size.
#define AV_PIX_FMT_GBRP16
AVCodecParameters * codecpar
Codec parameters associated with this stream.
char * window_title
Title of the window.
#define AV_PIX_FMT_RGBA64
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
void(APIENTRY * FF_PFNGLVERTEXATTRIBPOINTERPROC)(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, uintptr_t pointer)
FF_PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation
void(APIENTRY * FF_PFNGLENABLEVERTEXATTRIBARRAYPROC)(GLuint index)
static av_cold int opengl_read_limits(AVFormatContext *h)
static int write_trailer(AVFormatContext *s1)
Rational number (pair of numerator and denominator).
#define FF_GL_VERTEX_SHADER
GLuint program
Shader program.
@ AV_OPT_TYPE_COLOR
Underlying C type is uint8_t[4].
@ AV_OPT_TYPE_IMAGE_SIZE
Underlying C type is two consecutive integers.
const char * av_default_item_name(void *ptr)
Return the context name.
static const char *const FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR
Fragment shader for planar YUV formats.
@ AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT
static const char *const FF_OPENGL_FRAGMENT_SHADER_RGBA_PLANAR
Fragment shader for planar RGBA formats.
enum AVPixelFormat pix_fmt
Stream pixel format.
FF_PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
static av_cold void opengl_fill_color_map(OpenGLContext *opengl)
void(APIENTRY * FF_PFNGLDELETESHADERPROC)(GLuint shader)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
void(APIENTRY * FF_PFNGLGETPROGRAMIVPROC)(GLuint program, GLenum pname, GLint *params)
static av_cold int opengl_compile_shaders(OpenGLContext *opengl, enum AVPixelFormat pix_fmt)
#define OPENGL_ERROR_CHECK(ctx)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
static const char *const FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET
Fragment shader for packet RGB formats.
static av_cold void opengl_deinit_context(OpenGLContext *opengl)
FF_PFNGLUNIFORM1FPROC glUniform1f
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
static av_cold int opengl_prepare_vertex(AVFormatContext *s)
static int av_cold opengl_load_procedures(OpenGLContext *opengl)
static int opengl_draw(AVFormatContext *h, void *intput, int repaint, int is_pkt)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define FF_GL_COMPILE_STATUS
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
static int shift(int a, int b)
FF_PFNGLCOMPILESHADERPROC glCompileShader
FFOpenGLFunctions glprocs
#define FF_GL_STATIC_DRAW
#define FF_GL_ELEMENT_ARRAY_BUFFER
void(APIENTRY * FF_PFNGLUSEPROGRAMPROC)(GLuint program)
#define AV_PIX_FMT_BGR555
static uint8_t * opengl_get_plane_pointer(OpenGLContext *opengl, AVPacket *pkt, int comp_index, const AVPixFmtDescriptor *desc)
GLfloat chroma_div_w
Chroma subsampling w ratio.
GLint projection_matrix_location
Uniforms' locations.
FF_PFNGLLINKPROGRAMPROC glLinkProgram
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
int picture_width
Rendered width.
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AVERROR_EXTERNAL
Generic error in an external library.
GLuint index_buffer
Index buffer.
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
FF_PFNGLDELETEPROGRAMPROC glDeleteProgram
#define FF_GL_LINK_STATUS
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define AV_LOG_INFO
Standard information.
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
void(APIENTRY * FF_PFNGLGETPROGRAMINFOLOGPROC)(GLuint program, GLsizei bufSize, GLsizei *length, char *infoLog)
FF_PFNGLDELETESHADERPROC glDeleteShader
static int opengl_release_window(AVFormatContext *h)
#define AV_PIX_FMT_BGRA64
#define i(width, name, range_min, range_max)
static int opengl_create_window(AVFormatContext *h)
uint8_t background[4]
Background color.
static const AVClass opengl_class
static av_cold void opengl_get_texture_params(OpenGLContext *opengl)
#define LOAD_TEXTURE_DATA(comp_index, sub)
#define AV_PIX_FMT_RGB555
int inited
Set to 1 when write_header was successfully called.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
FF_PFNGLGENBUFFERSPROC glGenBuffers
#define FF_GL_ARRAY_BUFFER
#define AV_PIX_FMT_BGR565
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
FF_PFNGLATTACHSHADERPROC glAttachShader
static int write_packet(Muxer *mux, OutputStream *ost, AVPacket *pkt)
GLint model_view_matrix_location
#define AV_PIX_FMT_RGB565
FF_PFNGLBINDBUFFERPROC glBindBuffer
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_APP_TO_DEV_WINDOW_SIZE
Window size change message.
void(APIENTRY * FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
FF_PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
FF_PFNGLDELETEBUFFERSPROC glDeleteBuffers
static int opengl_resize(AVFormatContext *h, int width, int height)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
FF_PFNGLACTIVETEXTUREPROC glActiveTexture
FF_PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog
FF_PFNGLBUFFERDATAPROC glBufferData
FF_PFNGLUSEPROGRAMPROC glUseProgram
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
@ AV_OPT_TYPE_INT
Underlying C type is int.
#define FF_GL_RED_COMPONENT
GLfloat model_view_matrix[16]
Modev view matrix.
@ AV_WRITE_UNCODED_FRAME_QUERY
Query whether the feature is possible on this stream.
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
static int opengl_type_size(GLenum type)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define FF_GL_UNSIGNED_BYTE_3_3_2
#define FF_GL_INFO_LOG_LENGTH
GLint(APIENTRY * FF_PFNGLGETUNIFORMLOCATIONPROC)(GLuint program, const char *name)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
char * av_strdup(const char *s)
Duplicate a string.
static av_cold int opengl_write_trailer(AVFormatContext *h)
void(APIENTRY * FF_PFNGLLINKPROGRAMPROC)(GLuint program)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
void(APIENTRY * FF_PFNGLUNIFORMMATRIX4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
GLuint(APIENTRY * FF_PFNGLCREATEPROGRAMPROC)(void)
static av_cold int opengl_write_header(AVFormatContext *h)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int FUNC() message(CodedBitstreamContext *ctx, RWContext *rw, SEIRawMessage *current)
FF_PFNGLGETSHADERIVPROC glGetShaderiv
static const char *const FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR
Fragment shader for planar YUVA formats.
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
GLint chroma_div_w_location
void(APIENTRY * FF_PFNGLGENBUFFERSPROC)(GLsizei n, GLuint *buffers)
GLfloat projection_matrix[16]
Projection matrix.
#define FILL_COMPONENT(i)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define flags(name, subs,...)
FF_PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
FF_PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray
OpenGLVertexInfo vertex[4]
VBO.
GLint max_texture_size
Maximum texture size.
@ AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER
Destroy window buffer message.
FF_PFNGLSHADERSOURCEPROC glShaderSource
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
static void write_header(FFV1Context *f)
GLint(APIENTRY * FF_PFNGLGETATTRIBLOCATIONPROC)(GLuint program, const char *name)
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
void(APIENTRY * FF_PFNGLDELETEPROGRAMPROC)(GLuint program)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int non_pow_2_textures
1 when non power of 2 textures are supported