Go to the documentation of this file.
   37 #define RGBA(r,g,b,a) (((unsigned)(a) << 24) | ((r) << 16) | ((g) << 8) | (b)) 
   38 #define MAX_EPOCH_PALETTES 8   // Max 8 allowed per PGS epoch 
   39 #define MAX_EPOCH_OBJECTS  64  // Max 64 allowed per PGS epoch 
   40 #define MAX_OBJECT_REFS    2   // Max objects per display set 
  107     for (
i = 0; 
i < 
ctx->objects.count; 
i++) {
 
  109         ctx->objects.object[
i].rle_buffer_size  = 0;
 
  110         ctx->objects.object[
i].rle_remaining_len  = 0;
 
  112     ctx->objects.count = 0;
 
  113     ctx->palettes.count = 0;
 
  120     for (
i = 0; 
i < objects->
count; 
i++) {
 
  131     for (
i = 0; 
i < palettes->
count; 
i++) {
 
  163                       const uint8_t *buf, 
unsigned int buf_size)
 
  165     const uint8_t *rle_bitmap_end;
 
  166     int pixel_count, line_count;
 
  168     rle_bitmap_end = buf + buf_size;
 
  178     while (buf < rle_bitmap_end && line_count < rect->
h) {
 
  182         color = bytestream_get_byte(&buf);
 
  186             flags = bytestream_get_byte(&buf);
 
  189                 run = (
run << 8) + bytestream_get_byte(&buf);
 
  190             color = 
flags & 0x80 ? bytestream_get_byte(&buf) : 0;
 
  193         if (
run > 0 && pixel_count + run <= rect->
w * 
rect->
h) {
 
  201             if (pixel_count % 
rect->
w > 0) {
 
  212     if (pixel_count < rect->
w * 
rect->
h) {
 
  233                                   const uint8_t *buf, 
int buf_size)
 
  238     uint8_t sequence_desc;
 
  246     id = bytestream_get_be16(&buf);
 
  253         object = &
ctx->objects.object[
ctx->objects.count++];
 
  261     sequence_desc = bytestream_get_byte(&buf);
 
  263     if (!(sequence_desc & 0x80)) {
 
  269         object->rle_data_len += buf_size;
 
  270         object->rle_remaining_len -= buf_size;
 
  280     rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
 
  282     if (buf_size > rle_bitmap_len) {
 
  284                "Buffer dimension %d larger than the expected RLE data %d\n",
 
  285                buf_size, rle_bitmap_len);
 
  290     width  = bytestream_get_be16(&buf);
 
  291     height = bytestream_get_be16(&buf);
 
  305         object->rle_data_len = 0;
 
  306         object->rle_remaining_len = 0;
 
  310     memcpy(object->
rle, buf, buf_size);
 
  311     object->rle_data_len = buf_size;
 
  312     object->rle_remaining_len = rle_bitmap_len - buf_size;
 
  328                                   const uint8_t *buf, 
int buf_size)
 
  333     const uint8_t *buf_end = buf + buf_size;
 
  337     int r, 
g, 
b, r_add, g_add, b_add;
 
  340     id  = bytestream_get_byte(&buf);
 
  347         palette = &
ctx->palettes.palette[
ctx->palettes.count++];
 
  354     while (buf < buf_end) {
 
  355         color_id  = bytestream_get_byte(&buf);
 
  356         y         = bytestream_get_byte(&buf);
 
  357         cr        = bytestream_get_byte(&buf);
 
  358         cb        = bytestream_get_byte(&buf);
 
  359         alpha     = bytestream_get_byte(&buf);
 
  370         ff_dlog(avctx, 
"Color %d := (%d,%d,%d,%d)\n", color_id, 
r, 
g, 
b, 
alpha);
 
  390                                       const uint8_t *buf, 
int buf_size,
 
  395     const uint8_t *buf_end = buf + buf_size;
 
  398     int w = bytestream_get_be16(&buf);
 
  399     int h = bytestream_get_be16(&buf);
 
  401     ctx->presentation.pts = 
pts;
 
  403     ff_dlog(avctx, 
"Video Dimensions %dx%d\n",
 
  413     ctx->presentation.id_number = bytestream_get_be16(&buf);
 
  423     state = bytestream_get_byte(&buf) >> 6;
 
  432     ctx->presentation.palette_id = bytestream_get_byte(&buf);
 
  433     ctx->presentation.object_count = bytestream_get_byte(&buf);
 
  436                "Invalid number of presentation objects %d\n",
 
  437                ctx->presentation.object_count);
 
  438         ctx->presentation.object_count = 2;
 
  445     for (
i = 0; 
i < 
ctx->presentation.object_count; 
i++)
 
  449         if (buf_end - buf < 8) {
 
  451             ctx->presentation.object_count = 
i;
 
  455         object->id               = bytestream_get_be16(&buf);
 
  456         object->window_id        = bytestream_get_byte(&buf);
 
  457         object->composition_flag = bytestream_get_byte(&buf);
 
  459         object->x = bytestream_get_be16(&buf);
 
  460         object->y = bytestream_get_be16(&buf);
 
  464             object->crop_x = bytestream_get_be16(&buf);
 
  465             object->crop_y = bytestream_get_be16(&buf);
 
  466             object->crop_w = bytestream_get_be16(&buf);
 
  467             object->crop_h = bytestream_get_be16(&buf);
 
  470         ff_dlog(avctx, 
"Subtitle Placement x=%d, y=%d\n",
 
  471                 object->
x, object->
y);
 
  474             av_log(avctx, 
AV_LOG_ERROR, 
"Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n",
 
  475                    object->
x, object->
y,
 
  477             object->y = 
object->x = 0;
 
  498                                const uint8_t *buf, 
int buf_size)
 
  506     memset(sub, 0, 
sizeof(*sub));
 
  517     if (!
ctx->presentation.object_count)
 
  527                ctx->presentation.palette_id);
 
  531     for (
i = 0; 
i < 
ctx->presentation.object_count; 
i++) {
 
  545                    ctx->presentation.objects[
i].id);
 
  551         if (
ctx->presentation.objects[
i].composition_flag & 0x40)
 
  554         rect->
x    = 
ctx->presentation.objects[
i].x;
 
  555         rect->
y    = 
ctx->presentation.objects[
i].y;
 
  561             rect->linesize[0] = 
object->
w;
 
  581         rect->nb_colors = 256;
 
  586         if (!
ctx->forced_subs_only || 
ctx->presentation.objects[
i].composition_flag & 0x40)
 
  587             memcpy(
rect->data[1], palette->
clut, 
rect->nb_colors * 
sizeof(uint32_t));
 
  593                   int *got_sub_ptr, 
const AVPacket *avpkt)
 
  595     const uint8_t *buf = avpkt->
data;
 
  596     int buf_size       = avpkt->
size;
 
  598     const uint8_t *buf_end;
 
  599     uint8_t       segment_type;
 
  603     ff_dlog(avctx, 
"PGS sub packet:\n");
 
  605     for (
i = 0; 
i < buf_size; 
i++) {
 
  620     buf_end = buf + buf_size;
 
  623     while (buf < buf_end) {
 
  624         segment_type   = bytestream_get_byte(&buf);
 
  625         segment_length = bytestream_get_be16(&buf);
 
  627         ff_dlog(avctx, 
"Segment Length %d, Segment Type %x\n", segment_length, segment_type);
 
  633         switch (segment_type) {
 
  665                    segment_type, segment_length);
 
  673         buf += segment_length;
 
  679 #define OFFSET(x) offsetof(PGSSubContext, x) 
  680 #define SD AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_DECODING_PARAM 
  682     {
"forced_subs_only", 
"Only show forced subtitles", 
OFFSET(forced_subs_only), 
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 
SD},
 
  
#define AV_EF_EXPLODE
abort decoding on minor error detection
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static double cb(void *priv, double x, double y)
static av_cold int init_decoder(AVCodecContext *avctx)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
static const AVOption options[]
#define YUV_TO_RGB1_CCIR(cb1, cr1)
@ AV_CODEC_ID_HDMV_PGS_SUBTITLE
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static int parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int64_t pts)
Parse the presentation segment packet.
static av_cold void close(AVCodecParserContext *s)
static PGSSubObject * find_object(int id, PGSSubObjects *objects)
AVCodec p
The public AVCodec.
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int parse_object_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Parse the picture segment packet.
PGSSubPalette palette[MAX_EPOCH_PALETTES]
#define YUV_TO_RGB1_CCIR_BT709(cb1, cr1)
int64_t pts
Same as packet pts, in AV_TIME_BASE.
PGSSubObject object[MAX_EPOCH_OBJECTS]
#define CODEC_LONG_NAME(str)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static int display_end_segment(AVCodecContext *avctx, AVSubtitle *sub, const uint8_t *buf, int buf_size)
Parse the display segment packet.
const char * av_default_item_name(void *ptr)
Return the context name.
#define YUV_TO_RGB2_CCIR(r, g, b, y1)
static int decode(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
static PGSSubPalette * find_palette(int id, PGSSubPalettes *palettes)
PGSSubPresentation presentation
int(* init)(AVBSFContext *ctx)
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
#define AV_NOPTS_VALUE
Undefined timestamp value.
uint32_t end_display_time
#define AV_SUBTITLE_FLAG_FORCED
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
#define MAX_EPOCH_PALETTES
unsigned int rle_remaining_len
#define i(width, name, range_min, range_max)
unsigned int rle_data_len
PGSSubObjectRef objects[MAX_OBJECT_REFS]
const FFCodec ff_pgssub_decoder
#define av_malloc_array(a, b)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
static int decode_rle(AVCodecContext *avctx, AVSubtitleRect *rect, const uint8_t *buf, unsigned int buf_size)
Decode the RLE data.
#define MAX_EPOCH_OBJECTS
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void * av_calloc(size_t nmemb, size_t size)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
main external API structure.
static av_cold int close_decoder(AVCodecContext *avctx)
static int parse_palette_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Parse the palette segment packet.
#define FF_CODEC_DECODE_SUB_CB(func)
static const int16_t alpha[]
This structure stores compressed data.
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
static double cr(void *priv, double x, double y)
int width
picture width / height.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void flush_cache(AVCodecContext *avctx)
uint32_t start_display_time
static const AVClass pgsdec_class
unsigned int rle_buffer_size