• QT+Linux+FFmpeg+C/C++实现RTSP流存储为MP4视频文件


    参考:https://www.cnblogs.com/wenjingu/p/3990071.html

    不过上面是在windows下的开发,直接移植到Linux环境下会编译出现问题。

    下面是我将其移植到Linux下并编译通过录制成功、播放成功的代码。

    (1)编译FFmpeg

    下载下来先,我下的是ffmpeg-3.0.12.tar.xz

    执行configure-make-make install三部曲

      >./configure --enable-shared --prefix=/home/ffmpeg
      >make
      >make install

    将include和lib两个文件夹拷贝到QT工程目录下的ffmpeg文件夹下

    (2)QT工程引入FFmpeg进行开发

    .pro文件增加如下

      INCLUDEPATH += ./ffmpeg/include
      LIBS += ./ffmpeg/lib/libavcodec.so
          ./ffmpeg/lib/libavdevice.so
          ./ffmpeg/lib/libavfilter.so
          ./ffmpeg/lib/libavformat.so
          ./ffmpeg/lib/libavutil.so
          ./ffmpeg/lib/libswresample.so
          ./ffmpeg/lib/libswscale.so


    .h文件引入开发所需头文件,然后声明函数

         
         #include <pthread.h>
         #include <unistd.h>
      extern "C"
      {
      #include "libavcodec/avcodec.h"
      #include "libavformat/avformat.h"
      #include "libswscale/swscale.h"
      #include "libavdevice/avdevice.h"
      }
    
    class recordVideo:public QObject
    {
        void recordVideoExec();
      
    static void* ReadingThrd(void * pParam);
    };

    .cpp编写实现代码

    void recordVideo::recordVideoExec()
    {
        pthread_t thread;
        if(0 == pthread_create(&thread, NULL,ReadingThrd, NULL))
        {
            printf("thread create succ, main thread id is %u
    ",(unsigned)pthread_self());
        }
    }
    
    static AVFormatContext *i_fmt_ctx;
    static AVStream *i_video_stream;
    
    static AVFormatContext *o_fmt_ctx;
    static AVStream *o_video_stream;
    
    static bool bStop = false;
    static int frame_nums = 0;
    
    void *recordVideo::ReadingThrd(void * pParam)
    {
        avcodec_register_all();
        av_register_all();
        avformat_network_init();
    
        /* should set to NULL so that avformat_open_input() allocate a new one */
        i_fmt_ctx = NULL;
        //这是我用ONVIF协议得到的摄像头RTSP流媒体地址
        char rtspUrl[] = "rtsp://10.19.17.23:554/Streaming/Channels/101?transportmode=unicast&profile=Profile_1";
        const char *filename = "2.mp4";
    
        if (avformat_open_input(&i_fmt_ctx, rtspUrl, NULL, NULL)!=0)
        {
            fprintf(stderr, " = could not open input file
    ");
            return nullptr;
        }
    
        if (avformat_find_stream_info(i_fmt_ctx, NULL)<0)
        {
            fprintf(stderr, " = could not find stream info
    ");
            return nullptr;
        }
    
        //av_dump_format(i_fmt_ctx, 0, argv[1], 0);
    
        /* find first video stream */
        for (unsigned i=0; i<i_fmt_ctx->nb_streams; i++)
        {
            if (i_fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
            {
                i_video_stream = i_fmt_ctx->streams[i];
                break;
            }
        }
        if (i_video_stream == NULL)
        {
            fprintf(stderr, " = didn't find any video stream
    ");
            return nullptr;
        }
    
        avformat_alloc_output_context2(&o_fmt_ctx, NULL, NULL, filename);
    
        /*
            * since all input files are supposed to be identical (framerate, dimension, color format, ...)
            * we can safely set output codec values from first input file
            */
        o_video_stream = avformat_new_stream(o_fmt_ctx, NULL);
        {
            AVCodecContext *c;
            c = o_video_stream->codec;
            c->bit_rate = 400000;
            c->codec_id = i_video_stream->codec->codec_id;
            c->codec_type = i_video_stream->codec->codec_type;
            c->time_base.num = i_video_stream->time_base.num;
            c->time_base.den = i_video_stream->time_base.den;
            fprintf(stderr, " = time_base.num = %d time_base.den = %d
    ", c->time_base.num, c->time_base.den);
            c->width = i_video_stream->codec->width;
            c->height = i_video_stream->codec->height;
            c->pix_fmt = i_video_stream->codec->pix_fmt;
            printf(" =  %d height: %d pix_fmt: %d
    ", c->width, c->height, c->pix_fmt);
            c->flags = i_video_stream->codec->flags;
            c->flags |= CODEC_FLAG_GLOBAL_HEADER;
            c->me_range = i_video_stream->codec->me_range;
            c->max_qdiff = i_video_stream->codec->max_qdiff;
    
            c->qmin = i_video_stream->codec->qmin;
            c->qmax = i_video_stream->codec->qmax;
    
            c->qcompress = i_video_stream->codec->qcompress;
        }
    
        avio_open(&o_fmt_ctx->pb, filename, AVIO_FLAG_WRITE);
    
        avformat_write_header(o_fmt_ctx, NULL);
    
        int last_pts = 0;
        int last_dts = 0;
    
        int64_t pts, dts;
        while(!bStop)
        {
            //printf("------------------------------------------------------
    ");
            AVPacket i_pkt;
            av_init_packet(&i_pkt);
            i_pkt.size = 0;
            i_pkt.data = NULL;
            if (av_read_frame(i_fmt_ctx, &i_pkt) <0 )
                break;
            /*
             * pts and dts should increase monotonically
             * pts should be >= dts
             */
            i_pkt.flags |= AV_PKT_FLAG_KEY;
            pts = i_pkt.pts;
            i_pkt.pts += last_pts;
            dts = i_pkt.dts;
            i_pkt.dts += last_dts;
            i_pkt.stream_index = 0;
    
            //printf("%lld %lld
    ", i_pkt.pts, i_pkt.dts);
            static int num = 1;
            printf(" = frame %d
    ", num++);
            av_interleaved_write_frame(o_fmt_ctx, &i_pkt);
            //av_free_packet(&i_pkt);
            //av_init_packet(&i_pkt);
            //Sleep(10);
            //sleep(1);
            //linux下usleep()可以接受毫秒,要包含头文件#include <unistd.h>
            if(frame_nums > 2000)
            {
                bStop = true;
            }
            frame_nums++;
            usleep(10);
        }
    
        last_dts += dts;
        last_pts += pts;
    
        avformat_close_input(&i_fmt_ctx);
    
        av_write_trailer(o_fmt_ctx);
    
        avcodec_close(o_fmt_ctx->streams[0]->codec);
        av_freep(&o_fmt_ctx->streams[0]->codec);
        av_freep(&o_fmt_ctx->streams[0]);
    
        avio_close(o_fmt_ctx->pb);
        av_free(o_fmt_ctx);
    }

    (3)然后在main函数之中创建类对象,通过类对象调用成员函数 recordVideoExec()即可

  • 相关阅读:
    购物网站被p.egou.com强制恶意劫持
    css下拉菜单
    StringToInt
    JframeMaxSize
    frameMaxSize
    inputChar
    英语要求
    sciAndSubject
    fileRename
    tensorflowOnWindows
  • 原文地址:https://www.cnblogs.com/jieliujas/p/12574620.html
Copyright © 2020-2023  润新知