• ffplay源码分析06 ---- 视频输出


     =====================================================

    ffplay源码分析01 ---- 框架

    ffplay源码分析02 ---- 数据读取线程

    ffplay源码分析03 ---- 视频解码线程

    ffplay源码分析03 ---- 音频解码线程

    ffplay源码分析04 ---- 音频输出

    ffplay源码分析05 ---- 音频重采样

    ffplay源码分析06 ---- 视频输出

    ffplay源码分析07 ---- 音视频同步

    =====================================================

    初始化

    /* Called from the main */
    int main(int argc, char **argv)
    {
        int flags;
        VideoState *is;
        //    av_log_set_level(AV_LOG_TRACE);
        init_dynload();
        // 1. 对FFmpeg的初始化
        av_log_set_flags(AV_LOG_SKIP_REPEATED);
        parse_loglevel(argc, argv, options);
        /// av_log_set_level(AV_LOG_DEBUG);
        /* register all codecs, demux and protocols */
    #if CONFIG_AVDEVICE
        avdevice_register_all();
    #endif
        avformat_network_init();
    
        init_opts();
    
        signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
        signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
    
        show_banner(argc, argv, options);
        // 2. 对传递的参数进行初始化
        parse_options(NULL, argc, argv, options, opt_input_file);
    
        if (!input_filename) {
            show_usage();
            av_log(NULL, AV_LOG_FATAL, "An input file must be specified
    ");
            av_log(NULL, AV_LOG_FATAL,
                   "Use -h to get full help or, even better, run 'man %s'
    ", program_name);
            exit(1);
        }
    
        /* 是否显示视频 */
        if (display_disable) {
            video_disable = 1;
        }
        // 3. SDL的初始化
        flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
        /* 是否运行音频 */
        if (audio_disable)
            flags &= ~SDL_INIT_AUDIO;
        else {
            /* Try to work around an occasional ALSA buffer underflow issue when the
             * period size is NPOT due to ALSA resampling by forcing the buffer size. */
            if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
                SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
        }
        if (display_disable)
            flags &= ~SDL_INIT_VIDEO;
        if (SDL_Init (flags)) {
            av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s
    ", SDL_GetError());
            av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)
    ");
            exit(1);
        }
    
        SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
        SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
    
        av_init_packet(&flush_pkt);                // 初始化flush_packet
        flush_pkt.data = (uint8_t *)&flush_pkt; // 初始化为数据指向自己本身
    
        // 4. 创建窗口
        if (!display_disable) {
            int flags = SDL_WINDOW_HIDDEN;
            if (alwaysontop)
    #if SDL_VERSION_ATLEAST(2,0,5)
                flags |= SDL_WINDOW_ALWAYS_ON_TOP;
    #else
                av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.
    ");
    #endif
            if (borderless)
                flags |= SDL_WINDOW_BORDERLESS;
            else
                flags |= SDL_WINDOW_RESIZABLE;
            window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
            SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
            if (window) {
                // 创建renderer
                renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
                if (!renderer) {
                    av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s
    ", SDL_GetError());
                    renderer = SDL_CreateRenderer(window, -1, 0);
                }
                if (renderer) {
                    if (!SDL_GetRendererInfo(renderer, &renderer_info))
                        av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.
    ", renderer_info.name);
                }
            }
            if (!window || !renderer || !renderer_info.num_texture_formats) {
                av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
                do_exit(NULL);
            }
        }
        // 5. 通过stream_open函数,开启read_thread读取线程
        is = stream_open(input_filename, file_iformat);
        if (!is) {
            av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!
    ");
            do_exit(NULL);
        }
    
        // 6. 事件响应
        event_loop(is);
    
        /* never returns */
    
        return 0;
    }
    View Code

    初始化主要在main里面,主要做了如下几件事:

    1. SDL_Init,主要是SDL_INIT_VIDEO的⽀持
    2. SDL_CreateWindow,创建主窗⼝
    3. SDL_CreateRender,基于主窗⼝创建renderer,⽤于渲染输出。
    4. stream_open
    5. event_loop,播放控制事件响应循环,但也负责了video显示输出。

    video_refresh()

    /* called to display each frame */
    /* 非暂停或强制刷新的时候,循环调用video_refresh */
    static void video_refresh(void *opaque, double *remaining_time)
    {
        VideoState *is = opaque;
        double time;
    
        Frame *sp, *sp2;
    
        if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
            check_external_clock_speed(is);
    
        if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
            time = av_gettime_relative() / 1000000.0;
            if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
                video_display(is);
                is->last_vis_time = time;
            }
            *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
        }
    
        if (is->video_st) {
    retry:
            if (frame_queue_nb_remaining(&is->pictq) == 0) {// 帧队列是否为空
                // nothing to do, no picture to display in the queue
                // 什么都不做,队列中没有图像可显示
            } else { // 重点是音视频同步
                double last_duration, duration, delay;
                Frame *vp, *lastvp;
    
                /* dequeue the picture */
                // 从队列取出上一个Frame
                lastvp = frame_queue_peek_last(&is->pictq);//读取上一帧
                vp = frame_queue_peek(&is->pictq);  // 读取待显示帧
                // lastvp 上一帧(正在显示的帧)
                // vp 等待显示的帧
    
                if (vp->serial != is->videoq.serial) {
                    // 如果不是最新的播放序列,则将其出队列,以尽快读取最新序列的帧
                    frame_queue_next(&is->pictq);
                    goto retry;
                }
    
                if (lastvp->serial != vp->serial) {
                    // 新的播放序列重置当前时间
                    is->frame_timer = av_gettime_relative() / 1000000.0;
                }
    
                if (is->paused)
                {
                    goto display;
                    printf("视频暂停is->paused");
                }
                /* compute nominal last_duration */
                //lastvp上一帧,vp当前帧 ,nextvp下一帧
                //last_duration 计算上一帧应显示的时长
                last_duration = vp_duration(is, lastvp, vp);
    
                // 经过compute_target_delay方法,计算出待显示帧vp需要等待的时间
                // 如果以video同步,则delay直接等于last_duration。
                // 如果以audio或外部时钟同步,则需要比对主时钟调整待显示帧vp要等待的时间。
                delay = compute_target_delay(last_duration, is);
    
                time= av_gettime_relative()/1000000.0;
                // is->frame_timer 实际上就是上一帧lastvp的播放时间,
                // is->frame_timer + delay 是待显示帧vp该播放的时间
                if (time < is->frame_timer + delay) { //判断是否继续显示上一帧
                    // 当前系统时刻还未到达上一帧的结束时刻,那么还应该继续显示上一帧。
                    // 计算出最小等待时间
                    *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
                    goto display;
                }
    
                // 走到这一步,说明已经到了或过了该显示的时间,待显示帧vp的状态变更为当前要显示的帧
    
                is->frame_timer += delay;   // 更新当前帧播放的时间
                if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX) {
                    is->frame_timer = time; //如果和系统时间差距太大,就纠正为系统时间
                }
                SDL_LockMutex(is->pictq.mutex);
                if (!isnan(vp->pts))
                    update_video_pts(is, vp->pts, vp->pos, vp->serial); // 更新video时钟
                SDL_UnlockMutex(is->pictq.mutex);
                //丢帧逻辑
                if (frame_queue_nb_remaining(&is->pictq) > 1) {//有nextvp才会检测是否该丢帧
                    Frame *nextvp = frame_queue_peek_next(&is->pictq);
                    duration = vp_duration(is, vp, nextvp);
                    if(!is->step        // 非逐帧模式才检测是否需要丢帧 is->step==1 为逐帧播放
                            && (framedrop>0 ||      // cpu解帧过慢
                                (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) // 非视频同步方式
                            && time > is->frame_timer + duration // 确实落后了一帧数据
                            ) {
                        printf("%s(%d) dif:%lfs, drop frame
    ", __FUNCTION__, __LINE__,
                               (is->frame_timer + duration) - time);
                        is->frame_drops_late++;             // 统计丢帧情况
                        frame_queue_next(&is->pictq);       // 这里实现真正的丢帧
                        //(这里不能直接while丢帧,因为很可能audio clock重新对时了,这样delay值需要重新计算)
                        goto retry; //回到函数开始位置,继续重试
                    }
                }
    
                if (is->subtitle_st) {
                    while (frame_queue_nb_remaining(&is->subpq) > 0) {
                        sp = frame_queue_peek(&is->subpq);
    
                        if (frame_queue_nb_remaining(&is->subpq) > 1)
                            sp2 = frame_queue_peek_next(&is->subpq);
                        else
                            sp2 = NULL;
    
                        if (sp->serial != is->subtitleq.serial
                                || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
                                || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
                        {
                            if (sp->uploaded) {
                                int i;
                                for (i = 0; i < sp->sub.num_rects; i++) {
                                    AVSubtitleRect *sub_rect = sp->sub.rects[i];
                                    uint8_t *pixels;
                                    int pitch, j;
    
                                    if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
                                        for (j = 0; j < sub_rect->h; j++, pixels += pitch)
                                            memset(pixels, 0, sub_rect->w << 2);
                                        SDL_UnlockTexture(is->sub_texture);
                                    }
                                }
                            }
                            frame_queue_next(&is->subpq);
                        } else {
                            break;
                        }
                    }
                }
    
                frame_queue_next(&is->pictq);   // 当前vp帧出队列
                is->force_refresh = 1;          /* 说明需要刷新视频帧 */
    
                if (is->step && !is->paused)
                    stream_toggle_pause(is);
            }
    display:
            /* display picture */
            if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
                video_display(is); // 重点是显示
        }
        is->force_refresh = 0;
        if (show_status) {
            static int64_t last_time;
            int64_t cur_time;
            int aqsize, vqsize, sqsize;
            double av_diff;
    
            cur_time = av_gettime_relative();
            if (!last_time || (cur_time - last_time) >= 30000) {
                aqsize = 0;
                vqsize = 0;
                sqsize = 0;
                if (is->audio_st)
                    aqsize = is->audioq.size;
                if (is->video_st)
                    vqsize = is->videoq.size;
                if (is->subtitle_st)
                    sqsize = is->subtitleq.size;
                av_diff = 0;
                if (is->audio_st && is->video_st)
                    av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
                else if (is->video_st)
                    av_diff = get_master_clock(is) - get_clock(&is->vidclk);
                else if (is->audio_st)
                    av_diff = get_master_clock(is) - get_clock(&is->audclk);
                av_log(NULL, AV_LOG_INFO,
                       "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   
    ",
                       get_master_clock(is),
                       (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : "   ")),
                       av_diff,
                       is->frame_drops_early + is->frame_drops_late,
                       aqsize / 1024,
                       vqsize / 1024,
                       sqsize,
                       is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
                       is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
                fflush(stdout);
                last_time = cur_time;
            }
        }
    }

     video_image_display()

    static void video_image_display(VideoState *is)
    {
        Frame *vp;
        Frame *sp = NULL;
        SDL_Rect rect;
    
        // keep_last的作用就出来了,我们是有调用frame_queue_next, 但最近出队列的帧并没有真正销毁
        // 所以这里可以读取出来显示
        vp = frame_queue_peek_last(&is->pictq);
        if (is->subtitle_st) {
            if (frame_queue_nb_remaining(&is->subpq) > 0) {
                sp = frame_queue_peek(&is->subpq);
    
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
                    if (!sp->uploaded) {
                        uint8_t* pixels[4];
                        int pitch[4];
                        int i;
                        if (!sp->width || !sp->height) {
                            sp->width = vp->width;
                            sp->height = vp->height;
                        }
                        if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
                            return;
    
                        for (i = 0; i < sp->sub.num_rects; i++) {
                            AVSubtitleRect *sub_rect = sp->sub.rects[i];
    
                            sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
                            sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
                            sub_rect->w = av_clip(sub_rect->w, 0, sp->width  - sub_rect->x);
                            sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
    
                            is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
                                                                       sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
                                                                       sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
                                                                       0, NULL, NULL, NULL);
                            if (!is->sub_convert_ctx) {
                                av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context
    ");
                                return;
                            }
                            if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
                                sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
                                          0, sub_rect->h, pixels, pitch);
                                SDL_UnlockTexture(is->sub_texture);
                            }
                        }
                        sp->uploaded = 1;
                    }
                } else
                    sp = NULL;
            }
        }
        //将帧宽高按照sar最大适配到窗口,并通过rect返回视频帧在窗口的显示位置和宽高
        calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height,
                               vp->width, vp->height, vp->sar);
    //    rect.x = rect.w /2;   // 测试
    //    rect.w = rect.w /2;   // 缩放实际不是用sws, 缩放是sdl去做的
        if (!vp->uploaded) {
            // 把yuv数据更新到vid_texture
            if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
                return;
            vp->uploaded = 1;
            vp->flip_v = vp->frame->linesize[0] < 0;
        }
    
        set_sdl_yuv_conversion_mode(vp->frame);
        SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
        set_sdl_yuv_conversion_mode(NULL);
        if (sp) {
    #if USE_ONEPASS_SUBTITLE_RENDER
            SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
    #else
            int i;
            double xratio = (double)rect.w / (double)sp->width;
            double yratio = (double)rect.h / (double)sp->height;
            for (i = 0; i < sp->sub.num_rects; i++) {
                SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
                SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
                                   .y = rect.y + sub_rect->y * yratio,
                                   .w = sub_rect->w * xratio,
                                   .h = sub_rect->h * yratio};
                SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
            }
    #endif
        }
    }
  • 相关阅读:
    Flask 的 请求扩展 与 中间件
    Flask的配置文件 与 session
    django中的FBV和CBV
    Flask开启多线程、多进程
    WPF获取原始控件样式
    一个40岁老码农的总结,奋斗没有意义,选择大于努力
    FastText 分析与实践
    Flask的多app应用,多线程如何体现
    Python 远程调用MetaSploit
    GitLab 7.5.3 CentOS7安装和SMTP配置
  • 原文地址:https://www.cnblogs.com/vczf/p/14171184.html
Copyright © 2020-2023  润新知