• Live555学习之(六)---------- 在Live555中实现录像


      Live555还提供了录像的示例程序,在testProgs目录下的playCommon.cpp中,Live555录像的基本原理就是创建一个RTSPClient去请求指定rtsp地址的视频,然后保存到文件里。

      playCommon.cpp打开一看就发现首先是各种全局函数的声明,然后是各种全局变量的声明,然后是main函数和各个函数的实现。main函数中首先还是创建TaskScheduler对象和UsageEnvironment对象,然后根据各种输入参数设置各种全局变量,最后就是创建一个RTSPClient对象请求指定rtsp地址的视频。

     1 int main(int argc, char** argv) 
     2 {
     3   // Begin by setting up our usage environment:
     4   TaskScheduler* scheduler = BasicTaskScheduler::createNew();
     5   env = BasicUsageEnvironment::createNew(*scheduler);
     6   
     7    /*
     8         处理各种输入参数,在此省略
     9    */
    10   streamURL = argv[1];
    11 
    12   // Create (or arrange to create) our client object:
    13   if (createHandlerServerForREGISTERCommand) {
    14     handlerServerForREGISTERCommand
    15       = HandlerServerForREGISTERCommand::createNew(*env, continueAfterClientCreation0,
    16                            handlerServerForREGISTERCommandPortNum, authDBForREGISTER,
    17                            verbosityLevel, progName);
    18     if (handlerServerForREGISTERCommand == NULL) {
    19       *env << "Failed to create a server for handling incoming "REGISTER" commands: " << env->getResultMsg() << "
    ";
    20     } else {
    21       *env << "Awaiting an incoming "REGISTER" command on port " << handlerServerForREGISTERCommand->serverPortNum() << "
    ";
    22     }
    23   } else {
    24     ourClient = createClient(*env, streamURL, verbosityLevel, progName);
    25     if (ourClient == NULL) {
    26       *env << "Failed to create " << clientProtocolName << " client: " << env->getResultMsg() << "
    ";
    27       shutdown();
    28     }
    29     continueAfterClientCreation1();
    30   }
    31 
    32   // All subsequent activity takes place within the event loop:
    33   env->taskScheduler().doEventLoop(); // does not return
    34 
    35   return 0; // only to prevent compiler warning
    36 }
    37   

       createClient函数在openRTSP.cpp文件中定义,内容很简单,就是调用了RTSPClient::createNew函数创建了一个RTSPClient对象。我们来看continueAfterClientCreation1函数

      1 void continueAfterClientCreation1() {
      2   setUserAgentString(userAgent);
      3 
      4   if (sendOptionsRequest) {
      5     // Begin by sending an "OPTIONS" command:
      6     getOptions(continueAfterOPTIONS);                  // 发送OPTIONS命令,我们也可以跳过这一步直接发送DESCRIBE命令
      7   } else {
      8     continueAfterOPTIONS(NULL, 0, NULL);
      9   }
     10 }
     11 
     12 void getOptions(RTSPClient::responseHandler* afterFunc) { 
     13   ourRTSPClient->sendOptionsCommand(afterFunc, ourAuthenticator);
     14 }
     15 
     16 void continueAfterOPTIONS(RTSPClient*, int resultCode, char* resultString) {
     17   if (sendOptionsRequestOnly) {
     18     if (resultCode != 0) {
     19       *env << clientProtocolName << " "OPTIONS" request failed: " << resultString << "
    ";
     20     } else {
     21       *env << clientProtocolName << " "OPTIONS" request returned: " << resultString << "
    ";
     22     }
     23     shutdown();
     24   }
     25   delete[] resultString;
     26 
     27   // Next, get a SDP description for the stream:
     28   getSDPDescription(continueAfterDESCRIBE);                     // 发送DESCRIBE命令
     29 }
     30 
     31 void getSDPDescription(RTSPClient::responseHandler* afterFunc) {
     32   ourRTSPClient->sendDescribeCommand(afterFunc, ourAuthenticator);
     33 }
     34 
     35 void continueAfterDESCRIBE(RTSPClient*, int resultCode, char* resultString) {
     36   if (resultCode != 0) {
     37     *env << "Failed to get a SDP description for the URL "" << streamURL << "": " << resultString << "
    ";
     38     delete[] resultString;
     39     shutdown();
     40   }
     41 
     42   char* sdpDescription = resultString;
     43   *env << "Opened URL "" << streamURL << "", returning a SDP description:
    " << sdpDescription << "
    ";
     44 
     45   // Create a media session object from this SDP description:
     46   session = MediaSession::createNew(*env, sdpDescription);          //创建MediaSession
     47   delete[] sdpDescription;
     48   if (session == NULL) {
     49     *env << "Failed to create a MediaSession object from the SDP description: " << env->getResultMsg() << "
    ";
     50     shutdown();
     51   } else if (!session->hasSubsessions()) {
     52     *env << "This session has no media subsessions (i.e., no "m=" lines)
    ";
     53     shutdown();
     54   }
     55 
     56   // Then, setup the "RTPSource"s for the session:            
     57   MediaSubsessionIterator iter(*session);
     58   MediaSubsession *subsession;
     59   Boolean madeProgress = False;
     60   char const* singleMediumToTest = singleMedium;
     61   while ((subsession = iter.next()) != NULL) {
     62     // If we've asked to receive only a single medium, then check this now:
     63     if (singleMediumToTest != NULL) {
     64       if (strcmp(subsession->mediumName(), singleMediumToTest) != 0) {
     65           *env << "Ignoring "" << subsession->mediumName()
     66               << "/" << subsession->codecName()
     67               << "" subsession, because we've asked to receive a single " << singleMedium
     68               << " session only
    ";
     69     continue;
     70       } else {
     71     // Receive this subsession only
     72     singleMediumToTest = "xxxxx";
     73         // this hack ensures that we get only 1 subsession of this type
     74       }
     75     }
     76 
     77     if (desiredPortNum != 0) {
     78       subsession->setClientPortNum(desiredPortNum);                       //创建相关的RTPSource、Groupsock等资源
     79       desiredPortNum += 2;
     80     }
     81 
     82     if (createReceivers) {                                                  //我们接收数据然后保存在文件中,createReceivers为true
     83       if (!subsession->initiate(simpleRTPoffsetArg)) {                      //初始化MediaSubsession
     84     *env << "Unable to create receiver for "" << subsession->mediumName()
     85          << "/" << subsession->codecName()
     86          << "" subsession: " << env->getResultMsg() << "
    ";
     87       } else {
     88     *env << "Created receiver for "" << subsession->mediumName()
     89          << "/" << subsession->codecName() << "" subsession (";
     90     if (subsession->rtcpIsMuxed()) {
     91       *env << "client port " << subsession->clientPortNum();
     92     } else {
     93       *env << "client ports " << subsession->clientPortNum()
     94            << "-" << subsession->clientPortNum()+1;
     95     }
     96     *env << ")
    ";
     97     madeProgress = True;
     98     
     99     if (subsession->rtpSource() != NULL) {
    100       // Because we're saving the incoming data, rather than playing
    101       // it in real time, allow an especially large time threshold
    102       // (1 second) for reordering misordered incoming packets:
    103       unsigned const thresh = 1000000; // 1 second
    104       subsession->rtpSource()->setPacketReorderingThresholdTime(thresh);
    105       
    106       // Set the RTP source's OS socket buffer size as appropriate - either if we were explicitly asked (using -B),
    107       // or if the desired FileSink buffer size happens to be larger than the current OS socket buffer size.
    108       // (The latter case is a heuristic, on the assumption that if the user asked for a large FileSink buffer size,
    109       // then the input data rate may be large enough to justify increasing the OS socket buffer size also.)
    110       int socketNum = subsession->rtpSource()->RTPgs()->socketNum();
    111       unsigned curBufferSize = getReceiveBufferSize(*env, socketNum);
    112       if (socketInputBufferSize > 0 || fileSinkBufferSize > curBufferSize) {
    113         unsigned newBufferSize = socketInputBufferSize > 0 ? socketInputBufferSize : fileSinkBufferSize;
    114         newBufferSize = setReceiveBufferTo(*env, socketNum, newBufferSize);
    115         if (socketInputBufferSize > 0) { // The user explicitly asked for the new socket buffer size; announce it:
    116           *env << "Changed socket receive buffer size for the ""
    117            << subsession->mediumName()
    118            << "/" << subsession->codecName()
    119            << "" subsession from "
    120            << curBufferSize << " to "
    121            << newBufferSize << " bytes
    ";
    122         }
    123       }
    124     }
    125       }
    126     } else {
    127       if (subsession->clientPortNum() == 0) {
    128     *env << "No client port was specified for the ""
    129          << subsession->mediumName()
    130          << "/" << subsession->codecName()
    131          << "" subsession.  (Try adding the "-p <portNum>" option.)
    ";
    132       } else {
    133         madeProgress = True;
    134       }
    135     }
    136   }
    137   if (!madeProgress) shutdown();
    138 
    139   // Perform additional 'setup' on each subsession, before playing them:
    140   setupStreams();                                    //对每个ServerMediaSubsession发送SETUP命令
    141 }

      上面的流程和RTSPClient端与服务器建立连接的过程基本类似,先发送OPTIONS命令,然后发送DESCRIBE命令,然后发送SETUP命令。我们在此也可以忽略发送OPTIONS命令,直接从发送DESCRIBE命令开始。在setupStreams函数中分别对每个ServerMediaSubsession发送SETUP命令,我们来看一下setupStreams函数

     1 void setupStreams() {
     2   static MediaSubsessionIterator* setupIter = NULL;
     3   if (setupIter == NULL) setupIter = new MediaSubsessionIterator(*session);
     4   while ((subsession = setupIter->next()) != NULL) {
     5     // We have another subsession left to set up:
     6     if (subsession->clientPortNum() == 0) continue; // port # was not set
     7 
     8     setupSubsession(subsession, streamUsingTCP, forceMulticastOnUnspecified, continueAfterSETUP);  //发送SETUP命令,建立与ServerMediaSubsession的连接
     9     return;
    10   }
    11 
    12   // We're done setting up subsessions.  //与所有的ServerMediaSubsession建立连接成功
    13   delete setupIter;
    14   if (!madeProgress) shutdown();
    15 
    16   // Create output files:
    17   if (createReceivers) {
    18     if (fileOutputInterval > 0) {
    19       createPeriodicOutputFiles();    //创建周期性的输出文件,例如:我们可以设置每一个小时输出一个录像文件
    20     } else {
    21       createOutputFiles("");
    22     }
    23   }
    24 
    25   // Finally, start playing each subsession, to start the data flow:
    26   if (duration == 0) {
    27     if (scale > 0) duration = session->playEndTime() - initialSeekTime; // use SDP end time
    28     else if (scale < 0) duration = initialSeekTime;
    29   }
    30   if (duration < 0) duration = 0.0;
    31 
    32   endTime = initialSeekTime;
    33   if (scale > 0) {
    34     if (duration <= 0) endTime = -1.0f;
    35     else endTime = initialSeekTime + duration;
    36   } else {
    37     endTime = initialSeekTime - duration;
    38     if (endTime < 0) endTime = 0.0f;
    39   }
    40                 // 发送PLAY命令请求开始播放视频
    41   char const* absStartTime = initialAbsoluteSeekTime != NULL ? initialAbsoluteSeekTime : session->absStartTime();
    42   if (absStartTime != NULL) {
    43     // Either we or the server have specified that seeking should be done by 'absolute' time:
    44     startPlayingSession(session, absStartTime, session->absEndTime(), scale, continueAfterPLAY);
    45   } else {
    46     // Normal case: Seek by relative time (NPT):
    47     startPlayingSession(session, initialSeekTime, endTime, scale, continueAfterPLAY);        
    48   }
    49 }
    50 
    51 void setupSubsession(MediaSubsession* subsession, Boolean streamUsingTCP, Boolean forceMulticastOnUnspecified, RTSPClient::responseHandler* afterFunc) {
    52   
    53   ourRTSPClient->sendSetupCommand(*subsession, afterFunc, False, streamUsingTCP, forceMulticastOnUnspecified, ourAuthenticator);
    54 }
    55 
    56 void startPlayingSession(MediaSession* session, double start, double end, float scale, RTSPClient::responseHandler* afterFunc) {
    57   printf("
    
    
    %f - %f
    
    
    ",start,end);
    58   ourRTSPClient->sendPlayCommand(*session, afterFunc, start, end, scale, ourAuthenticator);
    59 }

      在setupStreams函数中依次与每个ServerMediaSubsession建立连接,都建立连接成功后,然后就调用createPeriodicOutputFiles函数周期性地创建输出录像的文件,然后开始发送PLAY命令请求播放视频。接下来我们看看createPeriodicOutputFiles这个函数

      1 void createPeriodicOutputFiles() {
      2   // Create a filename suffix that notes the time interval that's being recorded:
      3   char periodicFileNameSuffix[100];
      4   snprintf(periodicFileNameSuffix, sizeof periodicFileNameSuffix, "-%05d-%05d",
      5        fileOutputSecondsSoFar, fileOutputSecondsSoFar + fileOutputInterval);
      6   createOutputFiles(periodicFileNameSuffix);              //创建输出文件
      7 
      8   // Schedule an event for writing the next output file:    //添加一个停止当前录像,创建新录像文件的任务
      9   periodicFileOutputTask
     10     = env->taskScheduler().scheduleDelayedTask(fileOutputInterval*1000000,
     11                            (TaskFunc*)periodicFileOutputTimerHandler,
     12                            (void*)NULL);
     13 }
     14 
     15 void createOutputFiles(char const* periodicFilenameSuffix) {
     16   char outFileName[1000];
     17 
    // 创建对应的FileSink来获取和保存视频数据 18 if (outputQuickTimeFile || outputAVIFile) { 19 if (periodicFilenameSuffix[0] == '') { 20 // Normally (unless the '-P <interval-in-seconds>' option was given) we output to 'stdout': 21 sprintf(outFileName, "stdout"); 22 } else { 23 // Otherwise output to a type-specific file name, containing "periodicFilenameSuffix": 24 char const* prefix = fileNamePrefix[0] == '' ? "output" : fileNamePrefix; 25 snprintf(outFileName, sizeof outFileName, "%s%s.%s", prefix, periodicFilenameSuffix, 26 outputAVIFile ? "avi" : generateMP4Format ? "mp4" : "mov"); 27 } 28 29 if (outputQuickTimeFile) { 30 qtOut = QuickTimeFileSink::createNew(*env, *session, outFileName, 31 fileSinkBufferSize, 32 movieWidth, movieHeight, 33 movieFPS, 34 packetLossCompensate, 35 syncStreams, 36 generateHintTracks, 37 generateMP4Format); 38 if (qtOut == NULL) { 39 *env << "Failed to create a "QuickTimeFileSink" for outputting to "" 40 << outFileName << "": " << env->getResultMsg() << " "; 41 shutdown(); 42 } else { 43 *env << "Outputting to the file: "" << outFileName << "" "; 44 } 45 46 qtOut->startPlaying(sessionAfterPlaying, NULL); 47 } else { // outputAVIFile 48 aviOut = AVIFileSink::createNew(*env, *session, outFileName, 49 fileSinkBufferSize, 50 movieWidth, movieHeight, 51 movieFPS, 52 packetLossCompensate); 53 if (aviOut == NULL) { 54 *env << "Failed to create an "AVIFileSink" for outputting to "" 55 << outFileName << "": " << env->getResultMsg() << " "; 56 shutdown(); 57 } else { 58 *env << "Outputting to the file: "" << outFileName << "" "; 59 } 60 61 aviOut->startPlaying(sessionAfterPlaying, NULL); 62 } 63 } else { //我直接保存录像成.H264文件,所以执行此else分支 64 // Create and start "FileSink"s for each subsession: 65 madeProgress = False; 66 MediaSubsessionIterator iter(*session); 67 while ((subsession = iter.next()) != NULL) { 68 if (subsession->readSource() == NULL) continue; // was not initiated 69 70 // Create an output file for each desired stream: 71 if (singleMedium == NULL || periodicFilenameSuffix[0] != '') { 72 // Output file name is 73 // "<filename-prefix><medium_name>-<codec_name>-<counter><periodicFilenameSuffix>" 74 static unsigned streamCounter = 0; 75 snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d%s", 76 fileNamePrefix, subsession->mediumName(), 77 subsession->codecName(), ++streamCounter, periodicFilenameSuffix); 78 } else { 79 // When outputting a single medium only, we output to 'stdout 80 // (unless the '-P <interval-in-seconds>' option was given): 81 sprintf(outFileName, "stdout"); 82 } 83 84 FileSink* fileSink = NULL; 85 Boolean createOggFileSink = False; // by default 86 if (strcmp(subsession->mediumName(), "video") == 0) { 87 if (strcmp(subsession->codecName(), "H264") == 0) { // 创建H264VideoFileSink来获取和保存H264视频数据 88 // For H.264 video stream, we use a special sink that adds 'start codes', 89 // and (at the start) the SPS and PPS NAL units: 90 fileSink = H264VideoFileSink::createNew(*env, outFileName, 91 subsession->fmtp_spropparametersets(), 92 fileSinkBufferSize, oneFilePerFrame); 93 } else if (strcmp(subsession->codecName(), "H265") == 0) { 94 // For H.265 video stream, we use a special sink that adds 'start codes', 95 // and (at the start) the VPS, SPS, and PPS NAL units: 96 fileSink = H265VideoFileSink::createNew(*env, outFileName, 97 subsession->fmtp_spropvps(), 98 subsession->fmtp_spropsps(), 99 subsession->fmtp_sproppps(), 100 fileSinkBufferSize, oneFilePerFrame); 101 } else if (strcmp(subsession->codecName(), "THEORA") == 0) { 102 createOggFileSink = True; 103 } 104 } else if (strcmp(subsession->mediumName(), "audio") == 0) { 105 if (strcmp(subsession->codecName(), "AMR") == 0 || 106 strcmp(subsession->codecName(), "AMR-WB") == 0) { 107 // For AMR audio streams, we use a special sink that inserts AMR frame hdrs: 108 fileSink = AMRAudioFileSink::createNew(*env, outFileName, 109 fileSinkBufferSize, oneFilePerFrame); 110 } else if (strcmp(subsession->codecName(), "VORBIS") == 0 || 111 strcmp(subsession->codecName(), "OPUS") == 0) { 112 createOggFileSink = True; 113 } 114 } 115 if (createOggFileSink) { 116 fileSink = OggFileSink 117 ::createNew(*env, outFileName, 118 subsession->rtpTimestampFrequency(), subsession->fmtp_config()); 119 } else if (fileSink == NULL) { 120 // Normal case:                              //不属于上面的各种情形,创建一个普通FileSink获取和保存数据 121 fileSink = FileSink::createNew(*env, outFileName, 122 fileSinkBufferSize, oneFilePerFrame); 123 } 124 subsession->sink = fileSink; 125 126 if (subsession->sink == NULL) { 127 *env << "Failed to create FileSink for "" << outFileName 128 << "": " << env->getResultMsg() << " "; 129 } else { 130 if (singleMedium == NULL) { 131 *env << "Created output file: "" << outFileName << "" "; 132 } else { 133 *env << "Outputting data from the "" << subsession->mediumName() 134 << "/" << subsession->codecName() 135 << "" subsession to "" << outFileName << "" "; 136 } 137 138 if (strcmp(subsession->mediumName(), "video") == 0 && 139 strcmp(subsession->codecName(), "MP4V-ES") == 0 && 140 subsession->fmtp_config() != NULL) { 141 // For MPEG-4 video RTP streams, the 'config' information 142 // from the SDP description contains useful VOL etc. headers. 143 // Insert this data at the front of the output file: 144 unsigned configLen; 145 unsigned char* configData 146 = parseGeneralConfigStr(subsession->fmtp_config(), configLen); 147 struct timeval timeNow; 148 gettimeofday(&timeNow, NULL); 149 fileSink->addData(configData, configLen, timeNow); 150 delete[] configData; 151 } 152 153 subsession->sink->startPlaying(*(subsession->readSource()), // 开始获取数据并保存 154 subsessionAfterPlaying, 155 subsession); 156 157 // Also set a handler to be called if a RTCP "BYE" arrives 158 // for this subsession: 159 if (subsession->rtcpInstance() != NULL) { 160 subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, subsession); 161 } 162 163 madeProgress = True; 164 } 165 } 166 if (!madeProgress) shutdown(); 167 } 168 } 169 170 void periodicFileOutputTimerHandler(void* /*clientData*/) { //完成了一个录像文件,关闭相关资源,开始下一个录像文件 171 fileOutputSecondsSoFar += fileOutputInterval; 172 173 // First, close the existing output files: 174 closeMediaSinks(); 175 176 // Then, create new output files: 177 createPeriodicOutputFiles(); 178 } 179 180 void closeMediaSinks() { //关闭FileSink,释放资源 181 Medium::close(qtOut); qtOut = NULL; 182 Medium::close(aviOut); aviOut = NULL; 183 184 if (session == NULL) return; 185 MediaSubsessionIterator iter(*session); 186 MediaSubsession* subsession; 187 while ((subsession = iter.next()) != NULL) { 188 Medium::close(subsession->sink); 189 subsession->sink = NULL; 190 } 191 192 }

      首先创建一个录像文件,然后创建FileSink从服务器获取和保存录像数据,当指定的录像时长到了,就终止当前录像,开始下一个录像文件。如此,就可以实现每隔指定的一段时间录像成一个文件的功能。我这里是获取的H264编码的视频,创建的是H264VideoFileSink来获取和保存数据,H264VideoFileSink是H264or5VideoFileSink的子类,H264or5VideoFileSink又是FileSink的子类。FileSink通过MediaSubsession的FramedSource获取数据,然后保存到文件中,我们来看一下保存文件的过程。

     1 void H264or5VideoFileSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime) {
     2   unsigned char const start_code[4] = {0x00, 0x00, 0x00, 0x01};          //H264帧的开头是0x00000001
     3 
     4   if (!fHaveWrittenFirstFrame) {
     5     // If we have NAL units encoded in "sprop parameter strings", prepend these to the file:
     6     for (unsigned j = 0; j < 3; ++j) {
     7       unsigned numSPropRecords;
     8       SPropRecord* sPropRecords
     9     = parseSPropParameterSets(fSPropParameterSetsStr[j], numSPropRecords);
    10       for (unsigned i = 0; i < numSPropRecords; ++i) {
    11     addData(start_code, 4, presentationTime);
    12     addData(sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength, presentationTime);
    13       }
    14       delete[] sPropRecords;
    15     }
    16     fHaveWrittenFirstFrame = True; // for next time
    17   }
    18 
    19   // Write the input data to the file, with the start code in front:
    20   addData(start_code, 4, presentationTime);              //先把每一帧的头部写入文件             
    21   //调用FileSink类的afterGettingFrame函数写入一帧的图像数据到文件
    22   // Call the parent class to complete the normal file write with the input data:
    23   FileSink::afterGettingFrame(frameSize, numTruncatedBytes, presentationTime);   24 }
    25 
    26 void FileSink::addData(unsigned char const* data, unsigned dataSize,
    27                struct timeval presentationTime) {
    28   if (fPerFrameFileNameBuffer != NULL && fOutFid == NULL) {
    29     // Special case: Open a new file on-the-fly for this frame
    30     if (presentationTime.tv_usec == fPrevPresentationTime.tv_usec &&
    31     presentationTime.tv_sec == fPrevPresentationTime.tv_sec) {
    32       // The presentation time is unchanged from the previous frame, so we add a 'counter'
    33       // suffix to the file name, to distinguish them:
    34       sprintf(fPerFrameFileNameBuffer, "%s-%lu.%06lu-%u", fPerFrameFileNamePrefix,
    35           presentationTime.tv_sec, presentationTime.tv_usec, ++fSamePresentationTimeCounter);
    36     } else {
    37       sprintf(fPerFrameFileNameBuffer, "%s-%lu.%06lu", fPerFrameFileNamePrefix,
    38           presentationTime.tv_sec, presentationTime.tv_usec);
    39       fPrevPresentationTime = presentationTime; // for next time
    40       fSamePresentationTimeCounter = 0; // for next time
    41     }
    42     fOutFid = OpenOutputFile(envir(), fPerFrameFileNameBuffer);
    43   }
    44 
    45   // Write to our file:
    46 #ifdef TEST_LOSS
    47   static unsigned const framesPerPacket = 10;
    48   static unsigned const frameCount = 0;
    49   static Boolean const packetIsLost;
    50   if ((frameCount++)%framesPerPacket == 0) {
    51     packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss #####
    52   }
    53 
    54   if (!packetIsLost)
    55 #endif
    56   if (fOutFid != NULL && data != NULL) {
    57     fwrite(data, 1, dataSize, fOutFid);          // 写数据到文件
    58   }
    59 }
    60 
    61 
    62 void FileSink::afterGettingFrame(unsigned frameSize,
    63                  unsigned numTruncatedBytes,
    64                  struct timeval presentationTime) {
    65   if (numTruncatedBytes > 0) {
    66     envir() << "FileSink::afterGettingFrame(): The input frame data was too large for our buffer size ("
    67         << fBufferSize << ").  "
    68             << numTruncatedBytes << " bytes of trailing data was dropped!  Correct this by increasing the "bufferSize" parameter in the "createNew()" call to at least "
    69             << fBufferSize + numTruncatedBytes << "
    ";
    70   }
    71   addData(fBuffer, frameSize, presentationTime);        // 写视频数据到文件
    72 
    73   if (fOutFid == NULL || fflush(fOutFid) == EOF) {
    74     // The output file has closed.  Handle this the same way as if the input source had closed:
    75     if (fSource != NULL) fSource->stopGettingFrames();
    76     onSourceClosure();
    77     return;
    78   }
    79 
    80   if (fPerFrameFileNameBuffer != NULL) {
    81     if (fOutFid != NULL) { fclose(fOutFid); fOutFid = NULL; }
    82   }
    83 
    84   // Then try getting the next frame:
    85   continuePlaying();                // 获取下一帧的数据
    86 }

      H264VideoFileSink从FramedSource获取一帧数据后,先写H264帧的头部,然后再写视频数据,然后再获取下一帧数据,如此不断地将获取的视频数据写到录像文件中去。

  • 相关阅读:
    sqlserver把小数点后面多余的0去掉
    C#使用Linq对DataGridView进行模糊查找
    winform dataGridView DataGridViewComboBoxColumn 下拉框事件
    JGit与远程仓库链接使用的两种验证方式(ssh和https)
    Quartz不用配置文件配置启动
    SpringBoot之退出服务(exit)时调用自定义的销毁方法
    Java注解Annotation
    Java自定义数据验证注解Annotation
    我的ehcache笔记
    Java中泛型Class<T>、T与Class<?>
  • 原文地址:https://www.cnblogs.com/jqctop1/p/4469374.html
Copyright © 2020-2023  润新知