• 搞了一周,终于把视频流在局域网内传输搞定


    题目:视频流在Windows系统上的实时捕捉与传输 

    参考资料:以下资料都是我从网上找到并积累的,感觉都还可以,有参考价值。

    有点兴奋,一周的成果,不容易呀,真心的。

    从一个对视频这块一无所知的码农,到现在稍稍入门的码农。没有什么大的变化,不过还是了解到行行都比较难搞。

    言归正传:开始讲讲 我的大致思路:如下图所示。

     

    其实,之前想过多种方案。

    方案一:

    比较靠谱点的是在linux下,用fifo传输数据,一边采集并转化为h264,linux自己可以转,这个比windows简单多了。

    然后,live555这一端,用fifo(其实是管道)来搞,这样的好处在于 保证两个进程间通信。而且比较实时。

    当然,在windows下,也可以通过进程间通信的方式来实现 有很多方式,这里面不多做介绍。

    但是,还有更简单的方式,我们完全可以用多线程来实现,在另一个线程中,我们利用循环数组的方式实现,这样的话,可以保证实时性。

    方案二:

    用opencv采集数据,并可以对数据进行图像处理,然后,我们可以用libx264库对视频进行编码成h264格式,然后进行推送至rtsp服务器。

    不过,这里的一个难点就是 libx264使用不太熟悉,导致后来我放弃了这种方案。

    后来,看到雷神写的一篇文章,终于让我下定决心,用ffmpeg+live555来搞。

     

    如何实现:

    关键点分析:

    程序由三个线程组成: 一个采集并编码线程;一个将编码后的数据推送至rtsp264线程;一个主线程。线程之间传递数据利用循环数组实现。

    整体结构如下:

    下面一个个分析下实现思路:

    • 采集并编码线程:

    参考雷神的两篇日志

    http://blog.csdn.net/leixiaohua1020/article/details/39759623 最简单的基于FFmpeg的内存读写的例子:内存转码器

    http://blog.csdn.net/leixiaohua1020/article/details/39759163 最简单的基于FFmpeg的内存读写的例子:内存播放器

     

    • 将编码后的数据推送至rtsp264线程

    参考网上两个哥们的日志

    http://bubuko.com/infodetail-272265.html

    Windows下利用live555实现H264实时流RTSP发送

    http://blog.csdn.net/xiejiashu/article/details/8269873 用live555做本地视频采集转发,附源码

     

    主要是重写getnextframe 代码

    整个代码如下:

    1. #include "global.h"
    2. #include "FFmpegReadCamera.h"
    3. #include "H264LiveVideoServerMediaSubssion.hh"
    4. #include "H264FramedLiveSource.hh"
    5. #include "liveMedia.hh"
    6. #include "BasicUsageEnvironment.hh"
    7.  
    8. #define BUFSIZE 10000
    9.  
    10. DWORD WINAPI ChildFunc1(LPVOID);
    11. DWORD WINAPI ChildFunc2(LPVOID);
    12.  
    13.  
    14. static void announceStream(RTSPServer* rtspServer, ServerMediaSession* sms,char const* streamName)//显示RTSP连接信息
    15. {
    16.    char* url = rtspServer->rtspURL(sms);
    17.    UsageEnvironment& env = rtspServer->envir();
    18.    env <<streamName<< " ";
    19.    env << "Play this stream using the URL "" << url << "" ";
    20.    delete[] url;
    21. }
    22.  
    23. int main(int argc, char** argv)
    24. {
    25.    for (int i = 0; i < ARRAYLENGTH; ++i)
    26.    {
    27.       memset(&szbuffer[i],0,sizeof(StreamData));
    28.    }
    29.  
    30.    CreateThread(NULL,0,ChildFunc1,0,0,NULL);
    31.    Sleep(3000);
    32.    CreateThread(NULL,0,ChildFunc2,0,0,NULL);
    33.    while(1)
    34.    {
    35.       Sleep(1);
    36.    }
    37.     return 0;
    38.  
    39. }
    40.  
    41. DWORD WINAPI ChildFunc1(LPVOID p)
    42. {
    43.    int ret;
    44.    AVFormatContext *ofmt_ctx = NULL;
    45.    AVStream *out_stream;
    46.    AVStream *in_stream;
    47.    AVCodecContext *enc_ctx;
    48.    AVCodecContext *dec_ctx;
    49.    AVCodec* encoder;
    50.    enum AVMediaType type;
    51.    fp_write = fopen("test.h264","wb+");
    52.    unsigned int stream_index;
    53.    AVPacket enc_pkt;
    54.    int enc_got_frame;
    55.  
    56.    AVFormatContext *pFormatCtx;
    57.    int i, videoindex;
    58.    AVCodecContext *pCodecCtx;
    59.    AVCodec *pCodec;
    60.  
    61.    av_register_all();
    62.  
    63.    avformat_network_init();
    64.    pFormatCtx = avformat_alloc_context();
    65.  
    66.    avformat_alloc_output_context2(&ofmt_ctx,NULL,"h264",NULL);
    67.  
    68.    //Register Device 注册所有硬件
    69.    avdevice_register_all();
    70.    //Show Dshow Device 显示所有可用的硬件
    71.    show_dshow_device();
    72.    //Show Device Options 显示某一个硬件的所有参数(摄像头参数)
    73.    show_dshow_device_option();
    74.    //Show VFW Options
    75.    show_vfw_device();
    76.    //Windows
    77. #ifdef _WIN32
    78. #if USE_DSHOW
    79.    AVInputFormat *ifmt=av_find_input_format("dshow");
    80.    //Set own video device's name
    81.    if(avformat_open_input(&pFormatCtx,"video=Integrated Webcam",ifmt,NULL)!=0){
    82.       printf("Couldn't open input stream.(无法打开输入流) ");
    83.       return -1;
    84.    }
    85. #else
    86.    AVInputFormat *ifmt=av_find_input_format("vfwcap");
    87.    if(avformat_open_input(&pFormatCtx,"0",ifmt,NULL)!=0){
    88.       printf("Couldn't open input stream.(无法打开输入流) ");
    89.       return -1;
    90.    }
    91. #endif
    92. #endif
    93.    //Linux
    94. #ifdef linux
    95.    AVInputFormat *ifmt=av_find_input_format("video4linux2");
    96.    if(avformat_open_input(&pFormatCtx,"/dev/video0",ifmt,NULL)!=0){
    97.       printf("Couldn't open input stream.(无法打开输入流) ");
    98.       return -1;
    99.    }
    100. #endif
    101.  
    102.  
    103.    if(avformat_find_stream_info(pFormatCtx,NULL)<0)
    104.    {
    105.       printf("Couldn't find stream information.(无法获取流信息) ");
    106.       return -1;
    107.    }
    108.    videoindex=-1;
    109.    for(i=0; i<pFormatCtx->nb_streams; i++)
    110.       if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
    111.       {
    112.          videoindex=i;
    113.          break;
    114.       }
    115.       if(videoindex==-1)
    116.       {
    117.          printf("Couldn't find a video stream.(没有找到视频流) ");
    118.          return -1;
    119.       }
    120.  
    121.       pCodecCtx=pFormatCtx->streams[videoindex]->codec;
    122.       pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    123.       if(pCodec==NULL)
    124.       {
    125.          printf("Codec not found.(没有找到解码器) ");
    126.          return -1;
    127.       }
    128.       if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
    129.       {
    130.          printf("Could not open codec.(无法打开解码器) ");
    131.          return -1;
    132.       }
    133.  
    134.       AVFrame *pFrame,*pFrameYUV;
    135.       pFrame=avcodec_alloc_frame();
    136.       pFrameYUV=avcodec_alloc_frame();
    137.       int length = avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
    138.       uint8_t *out_buffer=(uint8_t *)av_malloc(length);
    139.  
    140.  
    141.       /*open output file*/
    142.       AVIOContext *avio_out = avio_alloc_context(out_buffer,length,0,NULL,NULL,write_buffer,NULL);
    143.       if (avio_out == NULL)
    144.       {
    145.          printf("申请内存失败! ");
    146.          return -1;
    147.       }
    148.  
    149.       ofmt_ctx->pb = avio_out;
    150.       ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;
    151.       for(int i = 0; i < 1; i++)
    152.       {
    153.          out_stream = avformat_new_stream(ofmt_ctx,NULL);
    154.          if (!out_stream)
    155.          {
    156.             av_log(NULL,AV_LOG_ERROR,"failed allocating output stream");
    157.             return AVERROR_UNKNOWN;
    158.          }
    159.          in_stream = pFormatCtx->streams[i];
    160.          dec_ctx = in_stream->codec;
    161.          enc_ctx = out_stream->codec;
    162.          //设置编码格式
    163.          if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
    164.          {
    165.             encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
    166.             enc_ctx->height = dec_ctx->height;
    167.             enc_ctx->width = dec_ctx->width;
    168.             enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
    169.             enc_ctx->pix_fmt = encoder->pix_fmts[0];
    170.             enc_ctx->time_base = dec_ctx->time_base;
    171.             enc_ctx->me_range = 16;
    172.             enc_ctx->max_qdiff = 4;
    173.             enc_ctx->qmin = 10;//10 这两个值调节清晰度
    174.             enc_ctx->qmax = 51;//51
    175.             enc_ctx->qcompress = 0.6;
    176.             enc_ctx->refs = 3;
    177.             enc_ctx->bit_rate = 500000;
    178.  
    179.             //enc_ctx->time_base.num = 1;
    180.             //enc_ctx->time_base.den = 25;
    181.             //enc_ctx->gop_size = 10;
    182.             //enc_ctx->bit_rate = 3000000;
    183.  
    184.             ret = avcodec_open2(enc_ctx,encoder,NULL);
    185.             if (ret < 0)
    186.             {
    187.                av_log(NULL,AV_LOG_ERROR,"Cannot open video encoder for stream #%u ",i);
    188.                return ret;
    189.             }
    190.             //av_opt_set(enc_ctx->priv_data,"tune","zerolatency",0);
    191.          }
    192.          else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN)
    193.          {
    194.             av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed ", i);
    195.             return AVERROR_INVALIDDATA;
    196.          }
    197.          else
    198.          {
    199.             // if this stream must be remuxed
    200.             ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,pFormatCtx->streams[i]->codec);
    201.             if (ret < 0)
    202.             {
    203.                av_log(NULL, AV_LOG_ERROR, "Copying stream context failed ");
    204.                return ret;
    205.             }
    206.          }
    207.          if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
    208.          {
    209.             enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
    210.          }
    211.       }
    212.       // init muxer, write output file header
    213.       ret = avformat_write_header(ofmt_ctx,NULL);
    214.       if (ret < 0)
    215.       {
    216.          if (ret < 0)
    217.          {
    218.             av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file ");
    219.             return ret;
    220.          }
    221.       }
    222.  
    223.       i = 0;
    224.       // pCodecCtx 解码的codec_context
    225.       avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
    226.       //SDL----------------------------
    227.       if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    228.          printf( "Could not initialize SDL - %s ", SDL_GetError());
    229.          return -1;
    230.       }
    231.       int screen_w=0,screen_h=0;
    232.       SDL_Surface *screen;
    233.       screen_w = pCodecCtx->width;
    234.       screen_h = pCodecCtx->height;
    235.       screen = SDL_SetVideoMode(screen_w, screen_h, 0,0);
    236.  
    237.       if(!screen) {
    238.          printf("SDL: could not set video mode - exiting:%s ",SDL_GetError());
    239.          return -1;
    240.       }
    241.       SDL_Overlay *bmp;
    242.       bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,SDL_YV12_OVERLAY, screen);
    243.       SDL_Rect rect;
    244.       //SDL End------------------------
    245.       int got_picture;
    246.  
    247.       AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));
    248.       //Output Information-----------------------------
    249.       printf("File Information(文件信息)--------------------- ");
    250.       av_dump_format(pFormatCtx,0,NULL,0);
    251.       printf("------------------------------------------------- ");
    252.  
    253. #if OUTPUT_YUV420P
    254.       FILE *fp_yuv=fopen("output.yuv","wb+");
    255. #endif
    256.  
    257.       struct SwsContext *img_convert_ctx;
    258.       img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
    259.       //------------------------------
    260.       //read all packets
    261.       while(av_read_frame(pFormatCtx, packet)>=0)
    262.       {
    263.          if(packet->stream_index==videoindex)
    264.          {
    265.  
    266.             type = pFormatCtx->streams[packet->stream_index]->codec->codec_type;
    267.             stream_index = packet->stream_index;
    268.             av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u ",stream_index);
    269.  
    270.             if (!pFrame)
    271.             {
    272.                ret = AVERROR(ENOMEM);
    273.                break;
    274.             }
    275.             packet->dts = av_rescale_q_rnd(packet->dts,
    276.                pFormatCtx->streams[stream_index]->time_base,
    277.                pFormatCtx->streams[stream_index]->codec->time_base,
    278.                (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    279.             packet->pts = av_rescale_q_rnd(packet->pts,
    280.                pFormatCtx->streams[stream_index]->time_base,
    281.                pFormatCtx->streams[stream_index]->codec->time_base,
    282.                (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    283.             ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
    284.             printf("Decode 1 Packet size:%d pts:%d ",packet->size,packet->pts);
    285.             if(ret < 0)
    286.             {
    287.                printf("Decode Error.(解码错误) ");
    288.                av_frame_free(&pFrame);
    289.                return -1;
    290.             }
    291.             if(got_picture)
    292.             {
    293.                //这句话是转换格式的函数,可用将rgb变为yuv格式
    294.                sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
    295.                //下面对转换为YUV格式的AVFramer进行编码即可
    296.                pFrameYUV->width = pFrame->width;
    297.                pFrameYUV->height = pFrame->height;
    298.  
    299.                /*pFrameYUV->pts = av_frame_get_best_effort_timestamp(pFrameYUV);
    300.                pFrameYUV->pict_type = AV_PICTURE_TYPE_NONE;*/
    301.  
    302.                enc_pkt.data = NULL;
    303.                enc_pkt.size = 0;
    304.                av_init_packet(&enc_pkt);
    305.                enc_pkt.data = out_buffer;
    306.                enc_pkt.size = length;
    307.  
    308.                //编码必须是YUV420P格式,不然编码不会成功。
    309.                ret = avcodec_encode_video2(ofmt_ctx->streams[stream_index]->codec,&enc_pkt,pFrameYUV,&enc_got_frame);
    310.                printf("Encode 1 Packet size:%d pts:%d ",enc_pkt.size,enc_pkt.pts);
    311.                if (ret == 0) //一定要记得 ret 值为0 ,代表成功。-1 才是代表失败。
    312.                {
    313.                   fwrite(enc_pkt.data,enc_pkt.size,1,fp_write); //存储编码后的h264文件,可以作为测试用
    314.                   memcpy(szbuffer[recvcount].str,enc_pkt.data,enc_pkt.size);
    315.                   szbuffer[recvcount].len = enc_pkt.size;
    316.                   recvcount = (recvcount + 1)%ARRAYLENGTH;
    317.                }
    318.                if (ret < 0)
    319.                {
    320.                   printf("encode failed");
    321.                   return -1;
    322.                }
    323.                if (!enc_got_frame)
    324.                {
    325.                   continue;
    326.                }
    327.  
    328.                /* prepare packet for muxing */
    329.                enc_pkt.stream_index = stream_index;
    330.                enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts,
    331.                   ofmt_ctx->streams[stream_index]->codec->time_base,
    332.                   ofmt_ctx->streams[stream_index]->time_base,
    333.                   (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    334.                enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts,
    335.                   ofmt_ctx->streams[stream_index]->codec->time_base,
    336.                   ofmt_ctx->streams[stream_index]->time_base,
    337.                   (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    338.                enc_pkt.duration = av_rescale_q(enc_pkt.duration,
    339.                   ofmt_ctx->streams[stream_index]->codec->time_base,
    340.                   ofmt_ctx->streams[stream_index]->time_base);
    341.                av_log(NULL, AV_LOG_INFO, "Muxing frame %d ",i);
    342.                /* mux encoded frame */
    343.                av_write_frame(ofmt_ctx,&enc_pkt);
    344.                if (ret < 0)
    345.                {
    346.                   printf("encode failed");
    347.                   return -1;
    348.                }
    349.  
    350. #if OUTPUT_YUV420P
    351.                int y_size=pCodecCtx->width*pCodecCtx->height;
    352.                fwrite(pFrameYUV->data[0],1,y_size,fp_yuv); //Y
    353.                fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv); //U
    354.                fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv); //V
    355. #endif
    356.                SDL_LockYUVOverlay(bmp);
    357.                bmp->pixels[0]=pFrameYUV->data[0];
    358.                bmp->pixels[2]=pFrameYUV->data[1];
    359.                bmp->pixels[1]=pFrameYUV->data[2];
    360.                bmp->pitches[0]=pFrameYUV->linesize[0];
    361.                bmp->pitches[2]=pFrameYUV->linesize[1];
    362.                bmp->pitches[1]=pFrameYUV->linesize[2];
    363.                SDL_UnlockYUVOverlay(bmp);
    364.                rect.x = 0;
    365.                rect.y = 0;
    366.                rect.w = screen_w;
    367.                rect.h = screen_h;
    368.                SDL_DisplayYUVOverlay(bmp, &rect);
    369.                //Delay 40ms
    370.                SDL_Delay(40);
    371.             }
    372.          }
    373.          av_free_packet(packet);
    374.       }
    375.  
    376.       /* flush encoders */
    377.       for (i = 0; i < 1; i++) {
    378.          /* flush encoder */
    379.          ret = flush_encoder(ofmt_ctx,i);
    380.          if (ret < 0) {
    381.             av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed ");
    382.             return -1;
    383.          }
    384.       }
    385.       av_write_trailer(ofmt_ctx);
    386.  
    387.       sws_freeContext(img_convert_ctx);
    388.  
    389. #if OUTPUT_YUV420P
    390.       fclose(fp_yuv);
    391. #endif
    392.  
    393.       SDL_Quit();
    394.  
    395.       av_free(out_buffer);
    396.       av_free(pFrameYUV);
    397.       avcodec_close(pCodecCtx);
    398.       avformat_close_input(&pFormatCtx);
    399.       //fcloseall();
    400.       return 0;
    401. }
    402.  
    403. DWORD WINAPI ChildFunc2(LPVOID p)
    404. {
    405.    //设置环境
    406.    UsageEnvironment* env;
    407.    Boolean reuseFirstSource = False;//如果为"true"则其他接入的客户端跟第一个客户端看到一样的视频流,否则其他客户端接入的时候将重新播放
    408.    TaskScheduler* scheduler = BasicTaskScheduler::createNew();
    409.    env = BasicUsageEnvironment::createNew(*scheduler);
    410.  
    411.    //创建RTSP服务器
    412.    UserAuthenticationDatabase* authDB = NULL;
    413.    RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);
    414.    if (rtspServer == NULL) {
    415.       *env << "Failed to create RTSP server: " << env->getResultMsg() << " ";
    416.       exit(1);
    417.    }
    418.    char const* descriptionString= "Session streamed by "testOnDemandRTSPServer"";
    419.  
    420.    //模拟实时流发送相关变量
    421.    int datasize;//数据区长度
    422.    unsigned char* databuf;//数据区指针
    423.    databuf = (unsigned char*)malloc(100000);
    424.    bool dosent;//rtsp发送标志位,为true则发送,否则退出
    425.  
    426.    //从文件中拷贝1M数据到内存中作为实时网络传输内存模拟,如果实时网络传输应该是双线程结构,记得在这里加上线程锁
    427.    //此外实时传输的数据拷贝应该是发生在H264FramedLiveSource文件中,所以这里只是自上往下的传指针过去给它
    428.     datasize = szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].len;
    429.    for (int i = 0; i < datasize; ++i)
    430.    {
    431.       databuf[i] = szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].str[i];
    432.    }
    433.    dosent = true;
    434.    //fclose(pf);
    435.  
    436.    //上面的部分除了模拟网络传输的部分外其他的基本跟live555提供的demo一样,而下面则修改为网络传输的形式,为此重写addSubsession的第一个参数相关文件
    437.    char const* streamName = "h264test";
    438.    ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName,descriptionString);
    439.    sms->addSubsession(H264LiveVideoServerMediaSubssion::createNew(*env, reuseFirstSource, &datasize, databuf,&dosent));//修改为自己实现的H264LiveVideoServerMediaSubssion
    440.    rtspServer->addServerMediaSession(sms);
    441.    announceStream(rtspServer, sms, streamName);//提示用户输入连接信息
    442.  
    443.    env->taskScheduler().doEventLoop(); //循环等待连接(没有链接的话,进不去下一步,无法调试)
    444.  
    445.    free(databuf);//释放掉内存
    446.    return 0;
    447. }

    Source核心代码:

    1. void H264FramedLiveSource::doGetNextFrame()
    2. {
    3.    if (*Framed_dosent == true)
    4.    {
    5.  
    6.      bufsizel = szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].len;
    7.    if (bufsizel > fMaxSize)
    8.    {
    9.       fFrameSize = fMaxSize;
    10.    }
    11.    else
    12.    {
    13.       fFrameSize = bufsizel;
    14.    }
    15.        /* for (int i = 0; i < fFrameSize; ++i)
    16.       {
    17.          Framed_databuf[i] = szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].str[i];
    18.       } */
    19.      memcpy(fTo, szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].str, fFrameSize);
    20.    }
    21.    nextTask() = envir().taskScheduler().scheduleDelayedTask(0,(TaskFunc*)FramedSource::afterGetting, this);//表示延迟0秒后再执行 afterGetting 函数
    22.    return;
    23. }

    代码下载链接:

    C:UsersDELLDesktopffmpeg开发files 暂且在这,以后再改。

  • 相关阅读:
    Java中的synchronized以及读写锁
    Java中的HashMap低层实现原理
    AOP
    PageRank算法
    Python基础
    RF创建测试库
    RF-RequestsLibrary
    selenium webdriver
    RF开发关键字(四)
    RF关键字(三)
  • 原文地址:https://www.cnblogs.com/zhuxuekui/p/4249183.html
Copyright © 2020-2023  润新知