• ffmpeg转码本地文件(一)


    ffmpeg转码本地文件(一)


    实现目标:输入本地文件。实现本地文件转码,里面包括mux层转码,codec层转码,视频格式转换,音频重採样等功能,功能点请看凝视。注意:凝视非常重要。


    #ifndef __FFMPEG_H__
    #define __FFMPEG_H__
    
    #include "info.h"
    
    extern "C"
    {
    #include "libavformat/avformat.h"
    #include "libavformat/avio.h"
    #include "libavcodec/avcodec.h"
    #include "libswscale/swscale.h"
    #include "libavutil/avutil.h"
    #include "libavutil/mathematics.h"
    #include "libswresample/swresample.h"
    #include "libavutil/opt.h"
    #include "libavutil/channel_layout.h"
    #include "libavutil/samplefmt.h"
    #include "libavdevice/avdevice.h"  //摄像头所用
    #include "libavfilter/avfilter.h"
    #include "libavutil/error.h"
    #include "libavutil/mathematics.h"  
    #include "libavutil/time.h"  
    #include "libavutil/fifo.h"
    #include "libavutil/audio_fifo.h"   //这里是做分片时候重採样编码音频用的
    #include "inttypes.h"
    #include "stdint.h"
    };
    
    #pragma comment(lib,"avformat.lib")
    #pragma comment(lib,"avcodec.lib")
    #pragma comment(lib,"avdevice.lib")
    #pragma comment(lib,"avfilter.lib")
    #pragma comment(lib,"avutil.lib")
    #pragma comment(lib,"postproc.lib")
    #pragma comment(lib,"swresample.lib")
    #pragma comment(lib,"swscale.lib")
    
    //#define INPUTURL   "../in_stream/22.flv" 
    //#define INPUTURL "../in_stream/闪电侠.The.Flash.S01E01.中英字幕.HDTVrip.624X352.mp4"
    //#define INPUTURL   "../in_stream/33.ts" 
    //#define INPUTURL   "../in_stream/22mp4.mp4" 
    //#define INPUTURL   "../in_stream/EYED0081.MOV" 
    //#define INPUTURL   "../in_stream/李荣浩 - 李白.mp3" 
    //#define INPUTURL   "../in_stream/avier1.mp4" 
    //#define INPUTURL   "../in_stream/分歧者2预告片.mp4" 
    //#define INPUTURL   "../in_stream/Class8简单介绍.m4v" 
    //#define INPUTURL   "../in_stream/22.ts" 
    //#define INPUTURL   "../in_stream/44.mp3"
    //#define INPUTURL   "../in_stream/ceshi.mp4"
    //#define INPUTURL   "../in_stream/33.mp4"
    #define INPUTURL   "../in_stream/father.avi"
    //#define INPUTURL   "../in_stream/西海情歌.wav" 
    //#define INPUTURL   "../in_stream/Furious_7_2015_International_Trailer_2_5.1-1080p-HDTN.mp4" 
    //#define INPUTURL   "../in_stream/Wildlife.wmv" 
    //#define INPUTURL   "../in_stream/单身男女2.HD1280超清国语版.mp4" 
    //#define INPUTURL     "rtmp://221.228.193.50:1935/live/teststream1" 
    #define OUTPUTURL  "../out_stream/father1111.mp4"
    //#define OUTPUTURL    "rtmp://221.228.193.50:1935/live/zwg"
    //#define OUTPUTURL    "rtmp://221.228.193.50:1935/live/zwg"
    
    enum AVSampleFormat_t 
    {
    	AV_SAMPLE_FMT_NONE_t = -1,
    	AV_SAMPLE_FMT_U8_t,          ///< unsigned 8 bits
    	AV_SAMPLE_FMT_S16_t,         ///< signed 16 bits
    	AV_SAMPLE_FMT_S32_t,         ///< signed 32 bits
    	AV_SAMPLE_FMT_FLT_t,         ///< float
    	AV_SAMPLE_FMT_DBL_t,         ///< double
    
    	AV_SAMPLE_FMT_U8P_t,         ///< unsigned 8 bits, planar
    	AV_SAMPLE_FMT_S16P_t,        ///< signed 16 bits, planar
    	AV_SAMPLE_FMT_S32P_t,        ///< signed 32 bits, planar
    	AV_SAMPLE_FMT_FLTP_t,        ///< float, planar
    	AV_SAMPLE_FMT_DBLP_t,        ///< double, planar
    
    	AV_SAMPLE_FMT_NB_t           ///< Number of sample formats. DO NOT USE if linking dynamically
    };
    
    /************************************************************************/
    /************************************************************************/
    //是否用音频fifo 否则用全局的fifo 两种方法
    //#define AUDIO_FIFO
    /************************************************************************/
    /************************************************************************/
    
    
    //video param
    extern int m_dwWidth;
    extern int m_dwHeight;
    extern double m_dbFrameRate;  //帧率                                                  
    extern AVCodecID video_codecID;
    extern AVPixelFormat video_pixelfromat;
    extern int gop_size;
    extern int max_b_frame;
    extern int thread_count; //用cpu内核数目
    
    //audio param
    extern int m_dwChannelCount; //声道
    extern AVSampleFormat_t m_dwBitsPerSample; //样本
    extern int m_dwFrequency;     //採样率
    extern AVCodecID audio_codecID;
    extern int audio_frame_size;
    extern int m_audiomuxtimebasetrue;  //音频的mux层timebse是否正确
    
    extern AVFifoBuffer * m_fifo; //存放pcm数据
    extern AVAudioFifo * m_audiofifo; //音频存放pcm数据
    extern int64_t m_first_audio_pts; //第一帧的音频pts
    extern int m_is_first_audio_pts; //是否已经记录第一帧音频时间戳
    
    #define AUDIO_ID            0                                                 //packet 中的ID ,假设先增加音频 pocket 则音频是 0  视频是1。否则相反(影响add_out_stream顺序)
    #define VIDEO_ID            1
    
    extern int nRet;                                                              //状态标志
    extern AVFormatContext* icodec;												  //输入流context
    extern AVFormatContext* ocodec ;                                              //输出流context
    extern char szError[256];                                                     //错误字符串
    extern AVStream* ovideo_st;
    extern AVStream* oaudio_st;              
    extern int video_stream_idx;
    extern int audio_stream_idx;
    extern AVCodec *audio_codec;
    extern AVCodec *video_codec;
    extern AVPacket pkt;   
    extern AVBitStreamFilterContext * vbsf_aac_adtstoasc;                         //aac->adts to asc过滤器
    static struct SwsContext * img_convert_ctx_video = NULL;
    static int sws_flags = SWS_BICUBIC; //差值算法,双三次
    
    
    int init_demux(char * Filename,AVFormatContext ** iframe_c);
    int init_mux();
    int uinit_demux();
    int uinit_mux();
    //for mux
    AVStream * add_out_stream(AVFormatContext* output_format_context,AVMediaType codec_type_t); 
    //for codec
    AVStream * add_out_stream2(AVFormatContext* output_format_context,AVMediaType codec_type_t,AVCodec **codec); 
    int init_decode(int stream_type);
    int init_code(int stream_type);
    int uinit_decode(int stream_type);
    int uinit_code(int stream_type);
    int perform_decode(int stream_type,AVFrame * picture);
    int perform_code(int stream_type,AVFrame * picture);   //用于AVFifoBuffer
    int perform_code2(int stream_type,AVFrame * picture);  //用于AVAudioFifo
    void perform_yuv_conversion(AVFrame * pinframe,AVFrame * poutframe);
    SwrContext * init_pcm_resample(AVFrame *in_frame, AVFrame *out_frame);
    void uinit_pcm_resample(AVFrame * poutframe,SwrContext * swr_ctx);
    int preform_pcm_resample(SwrContext * pSwrCtx,AVFrame *in_frame, AVFrame *out_frame);
    int audio_support(AVCodec * pCodec,int *channel,int * playout,int *samplePerSec,AVSampleFormat_t *sample_fmt);
    int video_support(AVCodec * pCodec,AVPixelFormat * video_pixelfromat);
    int transcode(); 
    void write_frame(AVFormatContext *ocodec,int ID,AVPacket pkt_t); //这个是依据传过来的buf 和size 写入文件
    
    #endif

    #include "ffmpeg.h"
    
    int nRet = 0;
    AVFormatContext* icodec = NULL; 
    AVFormatContext* ocodec = NULL;
    char szError[256]; 
    AVStream * ovideo_st = NULL;
    AVStream * oaudio_st = NULL;
    int video_stream_idx = -1;
    int audio_stream_idx = -1;
    AVCodec *audio_codec = NULL;
    AVCodec *video_codec = NULL;
    AVPacket pkt;
    AVBitStreamFilterContext * vbsf_aac_adtstoasc = NULL;
    
    //video param
    int m_dwWidth = 640;
    int m_dwHeight = 480;
    double m_dbFrameRate = 23;  //帧率                                                  
    AVCodecID video_codecID = AV_CODEC_ID_H264;
    AVPixelFormat video_pixelfromat = AV_PIX_FMT_YUV420P;
    int bit_rate = 400000;
    int gop_size = 12;
    int max_b_frame = 2;
    int thread_count = 2;
    
    //audio param
    int m_dwChannelCount = 2;      //声道
    int m_dwFrequency = 48000;     //採样率
    AVSampleFormat_t m_dwBitsPerSample = AV_SAMPLE_FMT_S16_t;    //样本
    int m_audiomuxtimebasetrue = 1;  //音频的mux层timebse是否正确
    
    //aac
    AVCodecID audio_codecID = AV_CODEC_ID_AAC;
    int audio_frame_size  = 1024;
    
    //mp3
    //AVCodecID audio_codecID = AV_CODEC_ID_MP3;
    //int audio_frame_size  = 1152;
    
    AVFifoBuffer * m_fifo = NULL;
    AVAudioFifo * m_audiofifo  = NULL;
    int64_t m_first_audio_pts = 0; 
    int m_is_first_audio_pts = 0;
    
    int init_demux(char * Filename,AVFormatContext ** iframe_c)
    {
    	int i = 0;
    	nRet = avformat_open_input(iframe_c, Filename,NULL, NULL);
    	if (nRet != 0)
    	{
    		av_strerror(nRet, szError, 256);
    		printf(szError);
    		printf("
    ");
    		printf("Call avformat_open_input function failed!
    ");
    		return 0;
    	}
    	if (avformat_find_stream_info(*iframe_c,NULL) < 0)
    	{
    		printf("Call av_find_stream_info function failed!
    ");
    		return 0;
    	}
    	//输出视频信息
    	av_dump_format(*iframe_c, -1, Filename, 0);
    
    	//加入音频信息到输出context
    	for (i = 0; i < (*iframe_c)->nb_streams; i++)
    	{
    		if ((*iframe_c)->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    		{
    			double FrameRate = (*iframe_c)->streams[i]->r_frame_rate.num /(double)(*iframe_c)->streams[i]->r_frame_rate.den;
    			m_dbFrameRate =(int)(FrameRate + 0.5); 
    			video_stream_idx = i;
    		}
    		else if ((*iframe_c)->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
    		{
    			audio_stream_idx = i;
    			if(icodec->streams[audio_stream_idx]->time_base.den == 0 ||
    				icodec->streams[audio_stream_idx]->time_base.num == 0 ||
    				icodec->streams[audio_stream_idx]->codec->sample_rate == 0)
    			{
    				m_audiomuxtimebasetrue = 0;
    			}
    		}
    	}
    
    	return 1;
    }
    
    int init_mux()
    {
    	int i = 0;
    	int ret = 0;
    	/* allocate the output media context */
    	avformat_alloc_output_context2(&ocodec, NULL,NULL, OUTPUTURL);
    	if (!ocodec) 
    	{
    		return getchar();
    	}
    	AVOutputFormat* ofmt = NULL;
    	ofmt = ocodec->oformat;
    
    	/* open the output file, if needed */
    	if (!(ofmt->flags & AVFMT_NOFILE))
    	{
    		if (avio_open(&ocodec->pb, OUTPUTURL, AVIO_FLAG_WRITE) < 0)
    		{
    			printf("Could not open '%s'
    ", OUTPUTURL);
    			return getchar();
    		}
    	}
    
    	//这里加入的时候AUDIO_ID/VIDEO_ID有影响
    	//加入音频信息到输出context
    	if(audio_stream_idx != -1)//假设存在音频
    	{
    		ofmt->audio_codec = audio_codecID;
    		//假设是音频须要编解码
    		if(audio_codecID != icodec->streams[audio_stream_idx]->codec->codec_id ||
    			1 != icodec->streams[audio_stream_idx]->codec->sample_fmt)
    		{
    			oaudio_st = add_out_stream2(ocodec, AVMEDIA_TYPE_AUDIO,&audio_codec);
    		}
    		else
    		{
    			oaudio_st = add_out_stream(ocodec, AVMEDIA_TYPE_AUDIO);
    		}
    		if ((strstr(ocodec->oformat->name, "flv") != NULL) || 
    			(strstr(ocodec->oformat->name, "mp4") != NULL) || 
    			(strstr(ocodec->oformat->name, "mov") != NULL) ||
    			(strstr(ocodec->oformat->name, "3gp") != NULL))    
    		{
    			if (oaudio_st->codec->codec_id == AV_CODEC_ID_AAC) 
    			{
    				vbsf_aac_adtstoasc =  av_bitstream_filter_init("aac_adtstoasc");  
    				if(vbsf_aac_adtstoasc == NULL)  
    				{  
    					return -1;  
    				} 
    			}
    		}
    	}
    
    
    	//加入视频信息到输出context
    	if (video_stream_idx != -1)//假设存在视频
    	{
    		ofmt->video_codec = video_codecID;
    		//假设是视频须要编解码
    		if(bit_rate != icodec->streams[video_stream_idx]->codec->bit_rate ||
    			m_dwWidth != icodec->streams[video_stream_idx]->codec->width ||
    			m_dwHeight != icodec->streams[video_stream_idx]->codec->height ||
    			video_codecID != icodec->streams[video_stream_idx]->codec->codec_id || 
    			m_dbFrameRate != av_q2d(icodec->streams[video_stream_idx]->r_frame_rate))
    		{
    			ovideo_st = add_out_stream2(ocodec, AVMEDIA_TYPE_VIDEO,&video_codec);
    		}
    		else
    		{
    			ovideo_st = add_out_stream(ocodec,AVMEDIA_TYPE_VIDEO);
    		}
    	}
    
    	av_dump_format(ocodec, 0, OUTPUTURL, 1);
    
    	if (video_stream_idx != -1)//假设存在视频
    	{
    		//假设是视频须要编解码
    		if(bit_rate != icodec->streams[video_stream_idx]->codec->bit_rate ||
    			m_dwWidth != icodec->streams[video_stream_idx]->codec->width ||
    			m_dwHeight != icodec->streams[video_stream_idx]->codec->height ||
    			video_codecID != icodec->streams[video_stream_idx]->codec->codec_id || 
    			m_dbFrameRate != av_q2d(icodec->streams[video_stream_idx]->r_frame_rate))
    		{
    			//解码初始化
    			ret = init_decode(VIDEO_ID);
    			//编码初始化
    			ret = init_code(VIDEO_ID);
    		}
    	}
    	//假设是音频须要编解码
    	if(audio_stream_idx != -1)//假设存在音频
    	{
    		if(audio_codecID != icodec->streams[audio_stream_idx]->codec->codec_id ||
    			1 != icodec->streams[audio_stream_idx]->codec->sample_fmt)
    		{
    			//解码初始化
    			ret = init_decode(AUDIO_ID);
    			//编码初始化
    			ret = init_code(AUDIO_ID);
    		}
    	}
    	ret = avformat_write_header(ocodec, NULL);
    	if (ret != 0)
    	{
    		printf("Call avformat_write_header function failed.
    ");
    		return 0;
    	}
    	return 1;
    }
    
    int uinit_demux()
    {
    	/* free the stream */
    	av_free(icodec);
    	return 1;
    }
    
    int uinit_mux()
    {
    	int i = 0;
    	nRet = av_write_trailer(ocodec);
    	if (nRet < 0)
    	{
    		av_strerror(nRet, szError, 256);
    		printf(szError);
    		printf("
    ");
    		printf("Call av_write_trailer function failed
    ");
    	}
    	if (vbsf_aac_adtstoasc !=NULL)
    	{
    		av_bitstream_filter_close(vbsf_aac_adtstoasc); 
    		vbsf_aac_adtstoasc = NULL;
    	}
    	av_dump_format(ocodec, -1, OUTPUTURL, 1); 
    
    	if (video_stream_idx != -1)//假设存在视频
    	{
    		//假设是视频须要编解码
    		if(bit_rate != icodec->streams[video_stream_idx]->codec->bit_rate ||
    			m_dwWidth != icodec->streams[video_stream_idx]->codec->width ||
    			m_dwHeight != icodec->streams[video_stream_idx]->codec->height ||
    			video_codecID != icodec->streams[video_stream_idx]->codec->codec_id || 
    			m_dbFrameRate != av_q2d(icodec->streams[video_stream_idx]->r_frame_rate))
    		{
    			uinit_decode(VIDEO_ID);
    			uinit_code(VIDEO_ID);
    		}
    	}
    	if(audio_stream_idx != -1)//假设存在音频
    	{
    		//假设是音频须要编解码
    		if(audio_codecID != icodec->streams[audio_stream_idx]->codec->codec_id ||
    			1 != icodec->streams[audio_stream_idx]->codec->sample_fmt)
    		{
    			uinit_decode(AUDIO_ID);
    			uinit_code(AUDIO_ID);
    		}
    	}
    	/* Free the streams. */
    	for (i = 0; i < ocodec->nb_streams; i++) 
    	{
    		av_freep(&ocodec->streams[i]->codec);
    		av_freep(&ocodec->streams[i]);
    	}
    	if (!(ocodec->oformat->flags & AVFMT_NOFILE))
    	{
    		/* Close the output file. */
    		avio_close(ocodec->pb);
    	}
    	av_free(ocodec);
    	return 1;
    }
    
    int init_decode(int stream_type)
    {
    	AVCodec *pcodec = NULL;
    	AVCodecContext *cctext = NULL;
    
    	if (stream_type == AUDIO_ID)
    	{
    		cctext = icodec->streams[audio_stream_idx]->codec;
    		pcodec = avcodec_find_decoder(cctext->codec_id);
    		if (!pcodec) 
    		{
    			return -1;
    		}
    	}
    	else if (stream_type == VIDEO_ID)
    	{
    		cctext = icodec->streams[video_stream_idx]->codec;
    		pcodec = avcodec_find_decoder(cctext->codec_id);
    		if (!pcodec) 
    		{
    			return -1;
    		}
    	}
    	//打开解码器
    	nRet = avcodec_open2(cctext, pcodec, NULL); 
    	if (nRet < 0)
    	{
    		printf("Could not open decoder
    ");
    		return -1;
    	}
    	return 1;
    }
    
    int init_code(int stream_type)
    {
    	AVCodecContext *cctext = NULL;
    
    	if (stream_type == AUDIO_ID)
    	{
    		cctext = oaudio_st->codec;
    		//打开编码器
    		nRet = avcodec_open2(cctext, audio_codec, NULL); 
    		if (nRet < 0)
    		{
    			printf("Could not open encoder
    ");
    			return 0;
    		}
    	}
    	else if (stream_type == VIDEO_ID)
    	{
    		cctext = ovideo_st->codec;
    		//打开编码器
    		nRet = avcodec_open2(cctext, video_codec, NULL); 
    		if (nRet < 0)
    		{
    			printf("Could not open encoder
    ");
    			return -1;
    		}
    	}
    	return 1;
    }
    
    int uinit_decode(int stream_type)
    {
    	AVCodecContext *cctext = NULL;
    
    	if (stream_type == AUDIO_ID)
    	{
    		cctext = oaudio_st->codec; 
    	}
    	else if (stream_type == VIDEO_ID)
    	{
    		cctext = icodec->streams[video_stream_idx]->codec; 
    	}
    	avcodec_close(cctext);
    	return 1;
    }
    
    int uinit_code(int stream_type)
    {
    	AVCodecContext *cctext = NULL;
    
    	if (stream_type == AUDIO_ID)
    	{
    		cctext = oaudio_st->codec; 
    	}
    	else if (stream_type == VIDEO_ID)
    	{
    		cctext = ovideo_st->codec; 
    	}
    	avcodec_close(cctext);
    	return 1;
    }
    
    AVStream * add_out_stream(AVFormatContext* output_format_context,AVMediaType codec_type_t)
    {
    	AVStream * in_stream = NULL;
    	AVStream * output_stream = NULL;
    	AVCodecContext* output_codec_context = NULL;
    
    	output_stream = avformat_new_stream(output_format_context,NULL);
    	if (!output_stream)
    	{
    		return NULL;
    	}
    
    	switch (codec_type_t)
    	{
    	case AVMEDIA_TYPE_AUDIO:
    		in_stream = icodec->streams[audio_stream_idx];
    		break;
    	case AVMEDIA_TYPE_VIDEO:
    		in_stream = icodec->streams[video_stream_idx];
    		break;
    	default:
    		break;
    	}
    
    	output_stream->id = output_format_context->nb_streams - 1;
    	output_codec_context = output_stream->codec;
    	output_stream->time_base  = in_stream->time_base;
    
    	int ret = 0;
    	ret = avcodec_copy_context(output_stream->codec, in_stream->codec);
    	if (ret < 0) 
    	{
    		printf("Failed to copy context from input to output stream codec context
    ");
    		return NULL;
    	}
    
    	//这个非常重要。要么纯复用解复用。不做编解码写头会失败,
    	//另或者须要编解码假设不这样,生成的文件没有预览图,还有加入以下的header失败,置0之后会又一次生成extradata
    	output_codec_context->codec_tag = 0; 
    
    	//if(! strcmp( output_format_context-> oformat-> name,  "mp4" ) ||
    	//!strcmp (output_format_context ->oformat ->name , "mov" ) ||
    	//!strcmp (output_format_context ->oformat ->name , "3gp" ) ||
    	//!strcmp (output_format_context ->oformat ->name , "flv"))
    	if(AVFMT_GLOBALHEADER & output_format_context->oformat->flags)
    	{
    		output_codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
    	}
    	return output_stream;
    }
    
    AVStream * add_out_stream2(AVFormatContext* output_format_context,AVMediaType codec_type_t,AVCodec **codec)
    {
    	AVCodecContext* output_codec_context = NULL;
    	AVStream * in_stream = NULL;
    	AVStream * output_stream = NULL;
    	AVCodecID codecID;
    
    	switch (codec_type_t)
    	{
    	case AVMEDIA_TYPE_AUDIO:
    		codecID = audio_codecID;
    		in_stream = icodec->streams[audio_stream_idx];
    		break;
    	case AVMEDIA_TYPE_VIDEO:
    		codecID = video_codecID;
    		in_stream = icodec->streams[video_stream_idx];
    		break;
    	default:
    		break;
    	}
    
    	/* find the encoder */
    	*codec = avcodec_find_encoder(codecID);
    	if (!(*codec)) 
    	{
    		return NULL;
    	}
    
    	output_stream = avformat_new_stream(output_format_context,*codec);
    	if (!output_stream)
    	{
    		return NULL;
    	}
    
    	output_stream->id = output_format_context->nb_streams - 1;
    	output_codec_context = output_stream->codec;
    	output_stream->time_base  = in_stream->time_base;
    
    	switch (codec_type_t)
    	{
    	case AVMEDIA_TYPE_AUDIO:
    		output_codec_context->codec_id = audio_codecID;
    		output_codec_context->codec_type = codec_type_t;
    		output_stream->start_time = 0;
    		output_codec_context->sample_rate = icodec->streams[audio_stream_idx]->codec->sample_rate;//m_dwFrequency;
    		if(icodec->streams[audio_stream_idx]->codec->channels > 2)
    		{
    			output_codec_context->channels = m_dwChannelCount;
    			output_codec_context->channel_layout = av_get_default_channel_layout(m_dwChannelCount);
    		}
    		else
    		{
    			output_codec_context->channels  = icodec->streams[audio_stream_idx]->codec->channels;
    			if (icodec->streams[audio_stream_idx]->codec->channel_layout == 0)
    			{
    				output_codec_context->channel_layout = av_get_default_channel_layout(icodec->streams[audio_stream_idx]->codec->channels);
    			}
    			else
    			{
    				output_codec_context->channel_layout = icodec->streams[audio_stream_idx]->codec->channel_layout;
    			}
    		}
    		//这个码率有些编码器不支持特别大,比如wav的码率是1411200 比aac大了10倍多
    		output_codec_context->bit_rate = 128000;//icodec->streams[audio_stream_idx]->codec->bit_rate;
    		output_codec_context->frame_size = audio_frame_size;
    		output_codec_context->sample_fmt  = (AVSampleFormat)m_dwBitsPerSample; //样本
    		output_codec_context->block_align = 0;
    		//查看音频支持的声道,採样率。样本
    		audio_support(*codec,&output_codec_context->channels,
    			(int *)&output_codec_context->channel_layout,
    			&output_codec_context->sample_rate,
    			(AVSampleFormat_t *)&output_codec_context->sample_fmt);
    		m_dwChannelCount = output_codec_context->channels;
    		m_dwFrequency = output_codec_context->sample_rate;
    		m_dwBitsPerSample = (AVSampleFormat_t)output_codec_context->sample_fmt;
    		break;
    	case AVMEDIA_TYPE_VIDEO:
    		AVRational r_frame_rate_t;
    		r_frame_rate_t.num = 100;
    		r_frame_rate_t.den = (int)(m_dbFrameRate * 100);
    		output_codec_context->time_base = in_stream->codec->time_base;
    		output_stream->time_base  = in_stream->time_base;
    		output_stream->r_frame_rate.num = r_frame_rate_t.den;
    		output_stream->r_frame_rate.den = r_frame_rate_t.num;
    		output_codec_context->codec_id = video_codecID;
    		output_codec_context->codec_type = codec_type_t;
    		output_stream->start_time = 0;
    		output_codec_context->pix_fmt = video_pixelfromat;
    		output_codec_context->width = m_dwWidth;
    		output_codec_context->height = m_dwHeight;
    		output_codec_context->bit_rate = bit_rate;
    		output_codec_context->gop_size  = gop_size;         /* emit one intra frame every twelve frames at most */;
    		output_codec_context->max_b_frames = max_b_frame;	//设置B帧最大数
    		output_codec_context->thread_count = thread_count;  //线程数目
    		output_codec_context->me_range = 16;
    		output_codec_context->max_qdiff = 4;
    		output_codec_context->qmin = 20; //调节清晰度和编码速度 //这个值调节编码后输出数据量越大输出数据量越小。越大编码速度越快。清晰度越差
    		output_codec_context->qmax = 40; //调节清晰度和编码速度
    		output_codec_context->qcompress = 0.6; 
    		//查看视频支持的yuv格式
    		video_support(*codec,&output_codec_context->pix_fmt);
    		video_pixelfromat = output_codec_context->pix_fmt;
    		break;
    	default:
    		break;
    	}
    	//这个非常重要,要么纯复用解复用,不做编解码写头会失败,
    	//另或者须要编解码假设不这样。生成的文件没有预览图,还有加入以下的header失败,置0之后会又一次生成extradata
    	output_codec_context->codec_tag = 0; 
    	//if(! strcmp( output_format_context-> oformat-> name,  "mp4" ) ||
    	//	!strcmp (output_format_context ->oformat ->name , "mov" ) ||
    	//	!strcmp (output_format_context ->oformat ->name , "3gp" ) ||
    	//	!strcmp (output_format_context ->oformat ->name , "flv" ))
    	if(AVFMT_GLOBALHEADER & output_format_context->oformat->flags)
    	{
    		output_codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
    	}
    	return output_stream;
    }
    
    int perform_decode(int stream_type,AVFrame * picture)
    {
    	AVCodecContext *cctext = NULL;
    	int frameFinished = 0 ; 
    
    	if (stream_type == AUDIO_ID)
    	{
    		cctext = icodec->streams[audio_stream_idx]->codec;
    		avcodec_decode_audio4(cctext,picture,&frameFinished,&pkt);
    		if(frameFinished)
    		{
    			return 0;
    		}
    	}
    	else if (stream_type == VIDEO_ID)
    	{
    		cctext = icodec->streams[video_stream_idx]->codec;
    		avcodec_decode_video2(cctext,picture,&frameFinished,&pkt);
    		if(frameFinished)
    		{
    			return 0;
    		}
    	}
    	return 1;
    }
    
    int perform_code(int stream_type,AVFrame * picture)
    {
    	AVCodecContext *cctext = NULL;
    	AVPacket pkt_t;
    	av_init_packet(&pkt_t);
    	pkt_t.data = NULL; // packet data will be allocated by the encoder
    	pkt_t.size = 0;
    	int frameFinished = 0 ;
    
    	if (stream_type == AUDIO_ID)
    	{
    		cctext = oaudio_st->codec;
    		//假设进和出的的声道,样本,採样率不同,须要重採样
    		if(icodec->streams[audio_stream_idx]->codec->sample_fmt != (AVSampleFormat)m_dwBitsPerSample ||
    			icodec->streams[audio_stream_idx]->codec->channels != m_dwChannelCount ||
    			icodec->streams[audio_stream_idx]->codec->sample_rate != m_dwFrequency)
    		{
    			int64_t pts_t = picture->pts;
    			int duration_t = 0;
    			if(m_audiomuxtimebasetrue == 0)
    			{
    				duration_t = (double)cctext->frame_size * (icodec->streams[audio_stream_idx]->codec->time_base.den /icodec->streams[audio_stream_idx]->codec->time_base.num)/ 
    					icodec->streams[audio_stream_idx]->codec->sample_rate;
    			}
    			else
    			{
    				duration_t = (double)cctext->frame_size * (icodec->streams[audio_stream_idx]->time_base.den /icodec->streams[audio_stream_idx]->time_base.num)/ 
    					icodec->streams[audio_stream_idx]->codec->sample_rate;
    			}
    
    			int frame_bytes = cctext->frame_size * av_get_bytes_per_sample(cctext->sample_fmt)* cctext->channels;
    			AVFrame * pFrameResample = avcodec_alloc_frame();
    			uint8_t * readbuff = new uint8_t[frame_bytes];
    
    			if(av_sample_fmt_is_planar(cctext->sample_fmt))
    			{
    				frame_bytes /= cctext->channels;
    			}
    
    			while (av_fifo_size(m_fifo) >= frame_bytes) //取出写入的未读的包
    			{
    				pFrameResample->nb_samples = cctext->frame_size;
    				av_fifo_generic_read(m_fifo, readbuff, frame_bytes, NULL);
    
    				//这里一定要考虑音频分片的问题
    				//假设是分片的avcodec_fill_audio_frame传入的buf是单声道的,可是buf_size 是两个声道加一起的数据量
    				//假设不是分片的avcodec_fill_audio_frame传入的buf是双声道的,buf_size 是两个声道加一起的数据量
    				if(av_sample_fmt_is_planar(cctext->sample_fmt))
    				{
    					avcodec_fill_audio_frame(pFrameResample,cctext->channels,cctext->sample_fmt,readbuff,frame_bytes * cctext->channels,1);
    				}
    				else
    				{					
    					avcodec_fill_audio_frame(pFrameResample,cctext->channels,cctext->sample_fmt,readbuff,frame_bytes,0);
    				}
    
    				if(m_is_first_audio_pts == 0)
    				{
    					m_first_audio_pts = pts_t;
    					m_is_first_audio_pts = 1;
    				}
    				pFrameResample->pts = m_first_audio_pts;
    				m_first_audio_pts += duration_t;
    
    
    				pFrameResample->pts = av_rescale_q_rnd(pFrameResample->pts, icodec->streams[audio_stream_idx]->codec->time_base, oaudio_st->codec->time_base, AV_ROUND_NEAR_INF);
    				nRet = avcodec_encode_audio2(cctext,&pkt_t,pFrameResample,&frameFinished);
    				if (nRet>=0 && frameFinished)
    				{
    					write_frame(ocodec,AUDIO_ID,pkt_t);
    					av_free_packet(&pkt_t);
    				}
    			}
    			if (readbuff)
    			{
    				delete []readbuff;
    				readbuff = NULL;
    			}
    			if (pFrameResample)
    			{
    				av_free(pFrameResample);
    				pFrameResample = NULL;
    			}
    		}
    		else
    		{
    			nRet = avcodec_encode_audio2(cctext,&pkt_t,picture,&frameFinished);
    			if (nRet>=0 && frameFinished)
    			{
    				write_frame(ocodec,AUDIO_ID,pkt_t);
    				av_free_packet(&pkt_t);
    			}
    		}
    	}
    	else if (stream_type == VIDEO_ID)
    	{
    		cctext = ovideo_st->codec;
    		//ticks_per_frame这个值用的不正确
    		//if(icodec->streams[video_stream_idx]->codec->ticks_per_frame != 1)
    		//{
    		//	AVRational time_base_video_t;
    		//	time_base_video_t.num = icodec->streams[video_stream_idx]->codec->time_base.num;
    		//	time_base_video_t.den = icodec->streams[video_stream_idx]->codec->time_base.den /icodec->streams[video_stream_idx]->codec->ticks_per_frame;
    		//	picture->pts = av_rescale_q_rnd(picture->pts, time_base_video_t, ovideo_st->codec->time_base, AV_ROUND_NEAR_INF);
    		//}
    		//else
    		//{
    		//	picture->pts = av_rescale_q_rnd(picture->pts, icodec->streams[video_stream_idx]->codec->time_base, ovideo_st->codec->time_base, AV_ROUND_NEAR_INF);
    		//}
    		picture->pts = av_rescale_q_rnd(picture->pts, icodec->streams[video_stream_idx]->codec->time_base, ovideo_st->codec->time_base, AV_ROUND_NEAR_INF);
    
    		avcodec_encode_video2(cctext,&pkt_t,picture,&frameFinished);
    		picture->pts++;
    		if (frameFinished)
    		{
    			write_frame(ocodec,VIDEO_ID,pkt_t);
    			av_free_packet(&pkt_t);
    		}
    	}
    	return 1;
    }
    
    int perform_code2(int stream_type,AVFrame * picture)
    {
    	AVCodecContext *cctext = NULL;
    	AVPacket pkt_t;
    	av_init_packet(&pkt_t);
    	pkt_t.data = NULL; // packet data will be allocated by the encoder
    	pkt_t.size = 0;
    	int frameFinished = 0 ;
    
    	if (stream_type == AUDIO_ID)
    	{
    		cctext = oaudio_st->codec;
    		//假设进和出的的声道。样本,採样率不同,须要重採样
    		if(icodec->streams[audio_stream_idx]->codec->sample_fmt != (AVSampleFormat)m_dwBitsPerSample ||
    			icodec->streams[audio_stream_idx]->codec->channels != m_dwChannelCount ||
    			icodec->streams[audio_stream_idx]->codec->sample_rate != m_dwFrequency)
    		{
    			int64_t pts_t = picture->pts;
    			int duration_t = 0;
    			if(m_audiomuxtimebasetrue == 0)
    			{
    				duration_t = (double)cctext->frame_size * (icodec->streams[audio_stream_idx]->codec->time_base.den /icodec->streams[audio_stream_idx]->codec->time_base.num)/ 
    					icodec->streams[audio_stream_idx]->codec->sample_rate;
    			}
    			else
    			{
    				duration_t = (double)cctext->frame_size * (icodec->streams[audio_stream_idx]->time_base.den /icodec->streams[audio_stream_idx]->time_base.num)/ 
    					icodec->streams[audio_stream_idx]->codec->sample_rate;
    			}
    
    			AVFrame * pFrameResample = avcodec_alloc_frame();
    			pFrameResample = av_frame_alloc();
    
    			pFrameResample->nb_samples     = cctext->frame_size;
    			pFrameResample->channel_layout = cctext->channel_layout;
    			pFrameResample->channels = cctext->channels;
    			pFrameResample->format         = cctext->sample_fmt;
    			pFrameResample->sample_rate    = cctext->sample_rate;
    			int error = 0;
    			if ((error = av_frame_get_buffer(pFrameResample, 0)) < 0)
    			{
    				av_frame_free(&pFrameResample);
    				return error;
    			}
    
    			while (av_audio_fifo_size(m_audiofifo) >= pFrameResample->nb_samples) //取出写入的未读的包
    			{
    				av_audio_fifo_read(m_audiofifo,(void **)pFrameResample->data,pFrameResample->nb_samples);
    
    				if(m_is_first_audio_pts == 0)
    				{
    					m_first_audio_pts = pts_t;
    					m_is_first_audio_pts = 1;
    				}
    				pFrameResample->pts = m_first_audio_pts;
    				m_first_audio_pts += duration_t;
    
    
    				pFrameResample->pts = av_rescale_q_rnd(pFrameResample->pts, icodec->streams[audio_stream_idx]->codec->time_base, oaudio_st->codec->time_base, AV_ROUND_NEAR_INF);
    				nRet = avcodec_encode_audio2(cctext,&pkt_t,pFrameResample,&frameFinished);
    				if (nRet>=0 && frameFinished)
    				{
    					write_frame(ocodec,AUDIO_ID,pkt_t);
    					av_free_packet(&pkt_t);
    				}
    			}
    			if (pFrameResample)
    			{
    				av_frame_free(&pFrameResample);
    				pFrameResample = NULL;
    			}
    		}
    		else
    		{
    			nRet = avcodec_encode_audio2(cctext,&pkt_t,picture,&frameFinished);
    			if (nRet>=0 && frameFinished)
    			{
    				write_frame(ocodec,AUDIO_ID,pkt_t);
    				av_free_packet(&pkt_t);
    			}
    		}
    	}
    	else if (stream_type == VIDEO_ID)
    	{
    		cctext = ovideo_st->codec;
    		if(icodec->streams[video_stream_idx]->codec->ticks_per_frame != 1)
    		{
    			AVRational time_base_video_t;
    			time_base_video_t.num = icodec->streams[video_stream_idx]->codec->time_base.num;
    			time_base_video_t.den = icodec->streams[video_stream_idx]->codec->time_base.den /icodec->streams[video_stream_idx]->codec->ticks_per_frame;
    			picture->pts = av_rescale_q_rnd(picture->pts, time_base_video_t, ovideo_st->codec->time_base, AV_ROUND_NEAR_INF);
    		}
    		else
    		{
    			picture->pts = av_rescale_q_rnd(picture->pts, icodec->streams[video_stream_idx]->codec->time_base, ovideo_st->codec->time_base, AV_ROUND_NEAR_INF);
    		}
    		avcodec_encode_video2(cctext,&pkt_t,picture,&frameFinished);
    		picture->pts++;
    		if (frameFinished)
    		{
    			write_frame(ocodec,VIDEO_ID,pkt_t);
    			av_free_packet(&pkt_t);
    		}
    	}
    	return 1;
    }
    
    void perform_yuv_conversion(AVFrame * pinframe,AVFrame * poutframe)
    {
    	//设置转换context
    	if (img_convert_ctx_video == NULL)   
    	{
    		img_convert_ctx_video = sws_getContext(icodec->streams[video_stream_idx]->codec->width, icodec->streams[video_stream_idx]->codec->height, 
    			icodec->streams[video_stream_idx]->codec->pix_fmt,
    			m_dwWidth, m_dwHeight,
    			video_pixelfromat,
    			sws_flags, NULL, NULL, NULL);
    		if (img_convert_ctx_video == NULL)
    		{
    			printf("Cannot initialize the conversion context
    ");
    		}
    	}
    	//開始转换
    	sws_scale(img_convert_ctx_video, pinframe->data, pinframe->linesize,         
    		0, icodec->streams[video_stream_idx]->codec->height, poutframe->data, poutframe->linesize);
    	poutframe->pkt_pts = pinframe->pkt_pts;
    	poutframe->pkt_dts = pinframe->pkt_dts;
    	//有时pkt_pts和pkt_dts不同,而且pkt_pts是编码前的dts,这里要给avframe传入pkt_dts而不能用pkt_pts
    	//poutframe->pts = poutframe->pkt_pts;
    	poutframe->pts = pinframe->pkt_dts;
    }
    
    SwrContext * init_pcm_resample(AVFrame *in_frame, AVFrame *out_frame)
    {
    	SwrContext * swr_ctx = NULL;
    	swr_ctx = swr_alloc();
    	if (!swr_ctx)
    	{
    		printf("swr_alloc error 
    ");
    		return NULL;
    	}
    	AVCodecContext * audio_dec_ctx = icodec->streams[audio_stream_idx]->codec;
    	AVSampleFormat sample_fmt;
    	sample_fmt = (AVSampleFormat)m_dwBitsPerSample; //样本
    	if (audio_dec_ctx->channel_layout == 0)
    	{
    		audio_dec_ctx->channel_layout = av_get_default_channel_layout(icodec->streams[audio_stream_idx]->codec->channels);
    	}
    	/* set options */
    	av_opt_set_int(swr_ctx, "in_channel_layout",    audio_dec_ctx->channel_layout, 0);
    	av_opt_set_int(swr_ctx, "in_sample_rate",       audio_dec_ctx->sample_rate, 0);
    	av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", audio_dec_ctx->sample_fmt, 0);
    	if(icodec->streams[audio_stream_idx]->codec->channels > 2)
    	{
    		av_opt_set_int(swr_ctx, "out_channel_layout",    av_get_default_channel_layout(m_dwChannelCount), 0);	
    	}
    	else
    	{
    		av_opt_set_int(swr_ctx, "out_channel_layout", audio_dec_ctx->channel_layout, 0);
    	}
    	av_opt_set_int(swr_ctx, "out_sample_rate",       audio_dec_ctx->sample_rate, 0);
    	av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", sample_fmt, 0);
    	swr_init(swr_ctx);
    
    	int64_t src_nb_samples = in_frame->nb_samples;
    	out_frame->nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx,oaudio_st->codec->sample_rate) + src_nb_samples,
    		oaudio_st->codec->sample_rate, oaudio_st->codec->sample_rate, AV_ROUND_UP);
    
    	int ret = av_samples_alloc(out_frame->data, &out_frame->linesize[0], 
    		icodec->streams[audio_stream_idx]->codec->channels, out_frame->nb_samples,oaudio_st->codec->sample_fmt,1);
    	if (ret < 0)
    	{
    		return NULL;
    	}
    
    #ifdef AUDIO_FIFO
    	m_audiofifo  = av_audio_fifo_alloc(oaudio_st->codec->sample_fmt, oaudio_st->codec->channels,
    		out_frame->nb_samples);
    #else
    	//pcm分包初始化
    	int buffersize = av_samples_get_buffer_size(NULL, oaudio_st->codec->channels,
    		2048, oaudio_st->codec->sample_fmt, 1);
    	m_fifo = av_fifo_alloc(buffersize);
    #endif
    	return swr_ctx;
    }
    
    int preform_pcm_resample(SwrContext * pSwrCtx,AVFrame *in_frame, AVFrame *out_frame)
    {
    	int ret = 0;
    	if (pSwrCtx != NULL) 
    	{
    		ret = swr_convert(pSwrCtx, out_frame->data, out_frame->nb_samples, 
    			(const uint8_t**)in_frame->data, in_frame->nb_samples);
    		if (ret < 0)
    		{
    			return -1;
    		}
    		//改动分包内存
    		int buffersize = av_samples_get_buffer_size(&out_frame->linesize[0], oaudio_st->codec->channels,
    			ret, oaudio_st->codec->sample_fmt, 1);
    #ifdef AUDIO_FIFO
    		int sss = av_audio_fifo_size(m_audiofifo);
    		sss = av_audio_fifo_realloc(m_audiofifo, av_audio_fifo_size(m_audiofifo) + out_frame->nb_samples);
    		av_audio_fifo_write(m_audiofifo,(void **)out_frame->data,out_frame->nb_samples);
    		sss = av_audio_fifo_size(m_audiofifo);
    #else
    		int sss = av_fifo_size(m_fifo);
    		sss = av_fifo_realloc2(m_fifo, av_fifo_size(m_fifo) + out_frame->linesize[0]);
    		sss = av_fifo_size(m_fifo);
    		av_fifo_generic_write(m_fifo, out_frame->data[0], out_frame->linesize[0], NULL);
    #endif
    		out_frame->pkt_pts = in_frame->pkt_pts;
    		out_frame->pkt_dts = in_frame->pkt_dts;
    		//有时pkt_pts和pkt_dts不同。而且pkt_pts是编码前的dts,这里要给avframe传入pkt_dts而不能用pkt_pts
    		//out_frame->pts = out_frame->pkt_pts;
    		out_frame->pts = in_frame->pkt_dts;
    	}
    	return 0;
    }
    
    void uinit_pcm_resample(AVFrame * poutframe,SwrContext * swr_ctx)
    {
    	if (poutframe)
    	{
    		avcodec_free_frame(&poutframe);
    		poutframe = NULL;
    	}
    	if (swr_ctx)
    	{
    		swr_free(&swr_ctx);
    		swr_ctx = NULL;
    	}
    #ifdef AUDIO_FIFO
    	if(m_audiofifo)
    	{
    		av_audio_fifo_free(m_audiofifo);
    		m_audiofifo = NULL;
    	}
    #else
    	//析构pcm分包结构
    	if(m_fifo)
    	{
    		av_fifo_free(m_fifo);
    		m_fifo = NULL;
    	}
    #endif
    }
    
    
    int audio_support(AVCodec * pCodec,int *channel,int * playout,int *samplePerSec,AVSampleFormat_t * sample_fmt)
    {
    	//支持的声道
    	if(NULL != pCodec->channel_layouts)
    	{
    		uint64_t layout = av_get_default_channel_layout(*channel);
    		if(0 == layout)
    		{
    			return 0;
    		}
    
    		int i = 0;	
    		int j = 0;
    		while(0 != pCodec->channel_layouts[j])
    		{
    			printf("pCodec->channel_layouts[j] : %d
    ",pCodec->channel_layouts[j]);
    			++j;
    		}
    		while(0 != pCodec->channel_layouts[i])
    		{
    			if(layout == pCodec->channel_layouts[i])
    			{
    				break;
    			}
    			++i;
    		}
    		//未找到
    		if(0 == pCodec->channel_layouts[i])
    		{
    			*playout = pCodec->channel_layouts[i-1];
    			*channel = av_get_channel_layout_nb_channels(*playout);
    		}
    	}
    
    	//支持的採样率
    	if(NULL != pCodec->supported_samplerates)
    	{
    		int i = 0;
    		int j = 0;
    		while(0 != pCodec->supported_samplerates[j])
    		{
    			printf("pCodec->supported_samplerates[j] : %d
    ",pCodec->supported_samplerates[j]);
    			++j;
    		}
    		while(0 != pCodec->supported_samplerates[i])
    		{
    			if(*samplePerSec == pCodec->supported_samplerates[i])
    			{
    				break;
    			}
    			++i;
    		}
    		//未找到
    		if(0 == pCodec->supported_samplerates[i])
    		{
    			*samplePerSec = pCodec->supported_samplerates[i-1];
    		}
    	}
    
    	//支持的样本
    	if(NULL != pCodec->sample_fmts)
    	{
    		int i = 0;
    		int j = 0;
    		while(-1 != pCodec->sample_fmts[j])
    		{
    			printf("pCodec->sample_fmts[j] : %d
    ",pCodec->sample_fmts[j]);
    			++j;
    		}
    		while(-1 != pCodec->sample_fmts[i])
    		{
    			if(*sample_fmt == pCodec->sample_fmts[i])
    			{
    				break;
    			}
    			++i;
    		}
    		//未找到
    		if(-1 == pCodec->sample_fmts[i])
    		{
    			*sample_fmt = (AVSampleFormat_t)pCodec->sample_fmts[i-1];
    		}
    	}
    
    	return 1;
    }
    
    int video_support(AVCodec * pCodec,AVPixelFormat * video_pixelfromat)
    {
    	//支持的yuv格式
    	if(NULL != pCodec->pix_fmts)
    	{
    		int i = 0;
    		int j = 0;
    		while(0 != pCodec->pix_fmts[j])
    		{
    			printf("pCodec->pix_fmts[j] : %d
    ",pCodec->pix_fmts[j]);
    			++j;
    		}
    		while(0 != pCodec->pix_fmts[i])
    		{
    			if(*video_pixelfromat == pCodec->pix_fmts[i])
    			{
    				break;
    			}
    			++i;
    		}
    		//未找到
    		if(-1 == pCodec->pix_fmts[i])
    		{
    			*video_pixelfromat = pCodec->pix_fmts[i-1];
    		}
    	}
    	return 1;
    }
    
    void write_frame(AVFormatContext *ocodec,int ID,AVPacket pkt_t)
    {
    	int64_t pts = 0, dts = 0;
    	int nRet = -1;
    
    	if(ID == VIDEO_ID)
    	{
    		AVPacket videopacket_t;
    		av_init_packet(&videopacket_t);
    
    		videopacket_t.pts = av_rescale_q_rnd(pkt_t.pts, icodec->streams[video_stream_idx]->time_base, ovideo_st->time_base, AV_ROUND_NEAR_INF);
    		videopacket_t.dts = av_rescale_q_rnd(pkt_t.dts, icodec->streams[video_stream_idx]->time_base, ovideo_st->time_base, AV_ROUND_NEAR_INF);
    		videopacket_t.duration = av_rescale_q(pkt_t.duration,icodec->streams[video_stream_idx]->time_base, ovideo_st->time_base);
    		videopacket_t.flags = pkt_t.flags;
    		videopacket_t.stream_index = VIDEO_ID; //这里add_out_stream顺序有影响
    		videopacket_t.data = pkt_t.data;
    		videopacket_t.size = pkt_t.size;
    		nRet = av_interleaved_write_frame(ocodec, &videopacket_t);
    		if (nRet != 0)
    		{
    			printf("error av_interleaved_write_frame _ video
    ");
    		}
    		printf("video
    ");
    	}
    	else if(ID == AUDIO_ID)
    	{
    		AVPacket audiopacket_t;
    		av_init_packet(&audiopacket_t);
    
    		if(m_audiomuxtimebasetrue == 0)
    		{
    			audiopacket_t.pts = av_rescale_q_rnd(pkt_t.pts, icodec->streams[audio_stream_idx]->codec->time_base, oaudio_st->time_base, AV_ROUND_NEAR_INF);
    			audiopacket_t.dts = av_rescale_q_rnd(pkt_t.dts, icodec->streams[audio_stream_idx]->codec->time_base, oaudio_st->time_base, AV_ROUND_NEAR_INF);
    			audiopacket_t.duration = av_rescale_q(pkt_t.duration,icodec->streams[audio_stream_idx]->codec->time_base, oaudio_st->time_base);
    		}
    		else
    		{
    			audiopacket_t.pts = av_rescale_q_rnd(pkt_t.pts, icodec->streams[audio_stream_idx]->time_base, oaudio_st->time_base, AV_ROUND_NEAR_INF);
    			audiopacket_t.dts = av_rescale_q_rnd(pkt_t.dts, icodec->streams[audio_stream_idx]->time_base, oaudio_st->time_base, AV_ROUND_NEAR_INF);
    			audiopacket_t.duration = av_rescale_q(pkt_t.duration,icodec->streams[audio_stream_idx]->time_base, oaudio_st->time_base);
    		}
    		audiopacket_t.flags = pkt_t.flags;
    		audiopacket_t.stream_index = AUDIO_ID; //这里add_out_stream顺序有影响
    		audiopacket_t.data = pkt_t.data;
    		audiopacket_t.size = pkt_t.size;
    
    		//加入过滤器
    		if(! strcmp( ocodec->oformat-> name,  "mp4" ) ||
    			!strcmp (ocodec ->oformat ->name , "mov" ) ||
    			!strcmp (ocodec ->oformat ->name , "3gp" ) ||
    			!strcmp (ocodec ->oformat ->name , "flv" ))
    		{
    			if (oaudio_st->codec->codec_id == AV_CODEC_ID_AAC)
    			{
    				if (vbsf_aac_adtstoasc != NULL)
    				{
    					AVPacket filteredPacket = audiopacket_t; 
    					int a = av_bitstream_filter_filter(vbsf_aac_adtstoasc,                                           
    						oaudio_st->codec, NULL,&filteredPacket.data, &filteredPacket.size,
    						audiopacket_t.data, audiopacket_t.size, audiopacket_t.flags & AV_PKT_FLAG_KEY); 
    					if (a >  0)             
    					{                
    						av_free_packet(&audiopacket_t); 
    						filteredPacket.destruct = av_destruct_packet;  
    						audiopacket_t = filteredPacket;             
    					}   
    					else if (a == 0)
    					{
    						audiopacket_t = filteredPacket;   
    					}
    					else if (a < 0)            
    					{                
    						fprintf(stderr, "%s failed for stream %d, codec %s",
    							vbsf_aac_adtstoasc->filter->name,audiopacket_t.stream_index,oaudio_st->codec->codec ?  oaudio_st->codec->codec->name : "copy");
    						av_free_packet(&audiopacket_t);   
    
    					}
    				}
    			}
    		}
    		nRet = av_interleaved_write_frame(ocodec, &audiopacket_t);
    		if (nRet != 0)
    		{
    			printf("error av_interleaved_write_frame _ audio
    ");
    		}
    		printf("audio
    ");
    	}
    }
    
    int transcode()
    {
    	AVFrame *pinframe = NULL;
    	AVFrame * pout_video_frame = NULL;
    	AVFrame * pout_audio_frame = NULL;
    	SwrContext * swr_ctx = NULL;
    	int dst_nb_samples = -1;
    	int resampled_data_size = 0;
    	//分配一个AVFrame并设置默认值
    	pinframe = avcodec_alloc_frame(); 
    	pout_video_frame = avcodec_alloc_frame();
    	pout_audio_frame = avcodec_alloc_frame();
    	pinframe->pts = 0;
    	pout_video_frame->pts = 0;
    	pout_audio_frame->pts = 0;
    	if (pinframe == NULL)
    	{
    		printf("avcodec_alloc_frame pinframe error
    ");
    		return 0;
    	}
    	//video
    	if (pout_video_frame == NULL)
    	{
    		printf("avcodec_alloc_frame pout_video_frame error
    ");
    		return 0;
    	}
    	int Out_size = avpicture_get_size(video_pixelfromat, m_dwWidth,m_dwHeight);
    	uint8_t * pOutput_buf =( uint8_t *)malloc(Out_size * 3 * sizeof(char)); //最大分配的空间,能满足yuv的各种格式
    	avpicture_fill((AVPicture *)pout_video_frame, (unsigned char *)pOutput_buf, video_pixelfromat,m_dwWidth, m_dwHeight); //内存关联
    
    	//audio
    	if (pout_audio_frame == NULL)
    	{
    		printf("avcodec_alloc_frame pout_audio_frame error
    ");
    		return 0;
    	}
    	avcodec_get_frame_defaults(pout_audio_frame);
    
    	//開始解包
    	while (1)
    	{
    		av_init_packet(&pkt);
    		if (av_read_frame(icodec, &pkt) < 0)
    		{
    			break;
    		}
    		//视频
    		if(pkt.stream_index == video_stream_idx) 
    		{
    			//假设是视频须要编解码
    			if(bit_rate != icodec->streams[video_stream_idx]->codec->bit_rate ||
    				m_dwWidth != icodec->streams[video_stream_idx]->codec->width ||
    				m_dwHeight != icodec->streams[video_stream_idx]->codec->height ||
    				video_codecID != icodec->streams[video_stream_idx]->codec->codec_id || 
    				m_dbFrameRate != av_q2d(icodec->streams[video_stream_idx]->r_frame_rate))
    			{
    				nRet = perform_decode(VIDEO_ID,pinframe);
    				if (nRet == 0)
    				{
    					perform_yuv_conversion(pinframe,pout_video_frame);
    #ifdef AUDIO_FIFO
    					nRet = perform_code2(VIDEO_ID,pout_video_frame);
    #else
    					nRet = perform_code(VIDEO_ID,pout_video_frame);
    #endif
    				}
    			}
    			else
    			{
    				write_frame(ocodec,VIDEO_ID,pkt);
    			}
    		}
    		//音频
    		else if (pkt.stream_index == audio_stream_idx)
    		{
    			//假设是音频须要编解码
    			if(audio_codecID != icodec->streams[audio_stream_idx]->codec->codec_id ||
    				1 != icodec->streams[audio_stream_idx]->codec->sample_fmt)
    			{
    				nRet = perform_decode(AUDIO_ID,pinframe);
    				if (nRet == 0)
    				{
    					//假设进和出的的声道,样本。採样率不同,须要重採样
    					if(icodec->streams[audio_stream_idx]->codec->sample_fmt != (AVSampleFormat)m_dwBitsPerSample ||
    						icodec->streams[audio_stream_idx]->codec->channels != m_dwChannelCount ||
    						icodec->streams[audio_stream_idx]->codec->sample_rate != m_dwFrequency)
    					{
    						if (swr_ctx == NULL)
    						{
    							swr_ctx = init_pcm_resample(pinframe,pout_audio_frame);
    						}
    						preform_pcm_resample(swr_ctx,pinframe,pout_audio_frame);
    #ifdef AUDIO_FIFO
    						perform_code2(AUDIO_ID,pout_audio_frame);
    #else
    						perform_code(AUDIO_ID,pout_audio_frame);
    #endif
    					}
    					else
    					{
    						pinframe->pts = pinframe->pkt_pts;
    #ifdef AUDIO_FIFO
    						perform_code2(AUDIO_ID,pinframe);
    #else
    						perform_code(AUDIO_ID,pinframe);
    #endif
    					}
    				}
    			}
    			else
    			{
    				write_frame(ocodec,AUDIO_ID,pkt);
    			}
    		}
    	}
    	if (pinframe)
    	{
    		avcodec_free_frame(&pinframe);
    		pinframe = NULL;
    	}
    	if (pout_video_frame)
    	{
    		avcodec_free_frame(&pout_video_frame);
    		pout_video_frame = NULL;
    	}
    	uinit_pcm_resample(pout_audio_frame,swr_ctx);
    	return 1;
    }

    实现效果:

     

    源代码地址:http://download.csdn.net/detail/zhuweigangzwg/9456782

    交流请加QQ群:62054820
    QQ:379969650.


  • 相关阅读:
    服务器与本地时间的倒计时
    没有花括号(大括号)的for循环也能正确执行
    js瀑布流效果
    AQS详解(AbstractQueuedSynchronizer)
    SimpleDateFormat的线程安全问题与解决方案
    jvm不打印异常栈
    Java中的序列化Serialable高级详解
    java梳理-序列化与反序列化
    AQS详解
    对ConditionQueue和锁的理解
  • 原文地址:https://www.cnblogs.com/wzzkaifa/p/7150627.html
Copyright © 2020-2023  润新知