• 使用ffmpeg将BMP图片编码为x264视频文件,将H264视频保存为BMP图片,yuv视频文件保存为图片的代码


      ffmpeg开源库,实现将bmp格式的图片编码成x264文件,并将编码好的H264文件解码保存为BMP文件。

    实现将视频文件yuv格式保存的图片格式的測试,图像格式png,jpg, gif等等測试均OK


      自己依据博客的代码,vs2010搭建的測试环境。资源下载 

       详细代码:

    #define _AFXDLL  
    #include<afxwin.h> 
    #ifdef  __cplusplus
    extern "C" {
    #endif
    
    #include <libavcodec/avcodec.h>
    #include <libavformat/avformat.h>
    #include <libswscale/swscale.h>
    	void main()
    	{
    		CFile file[5];
    		BYTE *szTxt[5];
    
    		int nWidth = 0;
    		int nHeight= 0;
    
    		int nDataLen=0;
    
    		int nLen;
    
    		CString csFileName;
    		for (int fileI = 1; fileI <= 5; fileI ++)
    		{
    			csFileName.Format("%d.bmp", fileI);
    			file[fileI - 1].Open(csFileName,CFile::modeRead | CFile::typeBinary);
    			nLen = file[fileI - 1].GetLength();
    
    			szTxt[fileI -1] = new BYTE[nLen];
    			file[fileI - 1].Read(szTxt[fileI - 1], nLen);
    			file[fileI - 1].Close();
    
    			//BMP bmi;//BITMAPINFO bmi;
    			//int nHeadLen = sizeof(BMP);
    			BITMAPFILEHEADER bmpFHeader;
    			BITMAPINFOHEADER bmpIHeader;
    			memcpy(&bmpFHeader,szTxt[fileI -1],sizeof(BITMAPFILEHEADER));
    
    			int nHeadLen = bmpFHeader.bfOffBits - sizeof(BITMAPFILEHEADER);
    			memcpy(&bmpIHeader,szTxt[fileI - 1]+sizeof(BITMAPFILEHEADER),nHeadLen);
    
    			nWidth = bmpIHeader.biWidth;// 464;// bmi.bmpInfo.bmiHeader.biWidth;// ;
    			nHeight = bmpIHeader.biHeight;//362;// bmi.bmpInfo.bmiHeader.biHeight;// ;
    
    			szTxt[fileI - 1] += bmpFHeader.bfOffBits;
    			nDataLen = nLen-bmpFHeader.bfOffBits;
    		}
    		getchar();
    		av_register_all();
    		avcodec_register_all();
    		AVFrame *m_pRGBFrame =  new AVFrame[1];  //RGB帧数据  
    		AVFrame *m_pYUVFrame = new AVFrame[1];;  //YUV帧数据
    		AVCodecContext *c= NULL;
    		AVCodecContext *in_c= NULL;
    		AVCodec *pCodecH264; //编码器
    		uint8_t * yuv_buff;//
    
    		//查找h264编码器
    		pCodecH264 = avcodec_find_encoder(CODEC_ID_H264);
    		if(!pCodecH264)
    		{
    			fprintf(stderr, "h264 codec not found
    ");
    			getchar();
    			exit(1);
    		}
    
    		c= avcodec_alloc_context3(pCodecH264);
    		c->bit_rate = 3000000;// put sample parameters 
    		c->width =nWidth;// 
    		c->height = nHeight;// 
    
    		// frames per second 
    		AVRational rate;
    		rate.num = 1;
    		rate.den = 25;
    		c->time_base= rate;//(AVRational){1,25};
    		c->gop_size = 10; // emit one intra frame every ten frames 
    		c->max_b_frames=1;
    		c->thread_count = 1;
    		c->pix_fmt = PIX_FMT_YUV420P;//PIX_FMT_RGB24;
    
    		//av_opt_set(c->priv_data, /*"preset"*/"libvpx-1080p.ffpreset", /*"slow"*/NULL, 0);
    		//打开编码器
    		if(avcodec_open2(c,pCodecH264,NULL)<0){
    			printf("avcodec_open2 failed
    ");
    			TRACE("不能打开编码库");
    			getchar();
    		}
    
    		int size = c->width * c->height;
    
    		yuv_buff = (uint8_t *) malloc((size * 3) / 2); // size for YUV 420 
    
    		//将rgb图像数据填充rgb帧
    		uint8_t * rgb_buff = new uint8_t[nDataLen];
    
    		//图象编码 outbuf_size太小会报错,图像清晰度也会差
    		int outbuf_size = 900000;
    		uint8_t * outbuf= (uint8_t*)malloc(outbuf_size); 
    		int u_size = 0;
    		FILE *f=NULL; 
    		char * filename = "myData.h264";
    		f = fopen(filename, "wb");
    		if (!f)
    		{
    			TRACE( "could not open %s
    ", filename);
    			getchar();
    			exit(1);
    		}
    
    		//初始化SwsContext
    		SwsContext * scxt = sws_getContext(c->width,c->height,PIX_FMT_BGR24,c->width,c->height,PIX_FMT_YUV420P,SWS_POINT,NULL,NULL,NULL);
    
    		AVPacket avpkt;
    
    		//AVFrame *pTFrame=new AVFrame
    		for (int i=0;i<250;++i)
    		{
    
    			//AVFrame *m_pYUVFrame = new AVFrame[1];
    
    			int index = (i / 25) % 5;
    			memcpy(rgb_buff,szTxt[index],nDataLen);
    
    			avpicture_fill((AVPicture*)m_pRGBFrame, (uint8_t*)rgb_buff, PIX_FMT_RGB24, nWidth, nHeight);
    
    			//将YUV buffer 填充YUV Frame
    			avpicture_fill((AVPicture*)m_pYUVFrame, (uint8_t*)yuv_buff, PIX_FMT_YUV420P, nWidth, nHeight);
    
    			// 翻转RGB图像
    			m_pRGBFrame->data[0]  += m_pRGBFrame->linesize[0] * (nHeight - 1);
    			m_pRGBFrame->linesize[0] *= -1;                   
    			m_pRGBFrame->data[1]  += m_pRGBFrame->linesize[1] * (nHeight / 2 - 1);
    			m_pRGBFrame->linesize[1] *= -1;
    			m_pRGBFrame->data[2]  += m_pRGBFrame->linesize[2] * (nHeight / 2 - 1);
    			m_pRGBFrame->linesize[2] *= -1;
    
    
    			//将RGB转化为YUV
    			sws_scale(scxt,m_pRGBFrame->data,m_pRGBFrame->linesize,0,c->height,m_pYUVFrame->data,m_pYUVFrame->linesize);
    
    			static int got_packet_ptr = 0;
    			av_init_packet(&avpkt);
    			avpkt.data = outbuf;
    			avpkt.size = outbuf_size;
    			u_size = avcodec_encode_video2(c, &avpkt, m_pYUVFrame, &got_packet_ptr);
    			m_pYUVFrame->pts++;
    			if (u_size == 0)
    			{
    				fwrite(avpkt.data, 1, avpkt.size, f);
    			}
    		}
    
    		fclose(f); 
    		delete []m_pRGBFrame;
    		delete []m_pYUVFrame;
    		delete []rgb_buff;
    		free(outbuf);
    		avcodec_close(c);
    		av_free(c);
    
    	}
    
    #ifdef  __cplusplus
    }
    #endif

     全然依照博客中的代码測试发现会报以下的信息,并且在播放过程中,画面都是模糊的。改动了outbuff_size的大小攻克了这个问题。



     

    疑问:为什么要循环250次?有知道麻烦解答下!
    for (int i=0;i<250;++i)

    将H264视频保存为BMP图片,详细代码例如以下:

    #include <stdio.h>
    #include <stdlib.h>
    #include <string.h>
    #include <windows.h>
    
    #ifdef  __cplusplus
    extern "C" {
    #endif
    
    #include <libavcodec/avcodec.h>
    #include <libavformat/avformat.h>
    #include <libswscale/swscale.h>
    
    void SaveAsBMP (AVFrame *pFrameRGB, int width, int height, int index, int bpp)
    {
    	char buf[5] = {0};
    	BITMAPFILEHEADER bmpheader;
    	BITMAPINFOHEADER bmpinfo;
    	FILE *fp;
    
    	char filename[20] = "";
    	_itoa (index, buf, 10);
    	strcat (filename, buf);
    	strcat (filename, ".bmp");
    
    	if ( (fp = fopen(filename,"wb+")) == NULL )
    	{
    		printf ("open file failed!
    ");
    		return;
    	}
    
    	bmpheader.bfType = 0x4d42;
    	bmpheader.bfReserved1 = 0;
    	bmpheader.bfReserved2 = 0;
    	bmpheader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
    	bmpheader.bfSize = bmpheader.bfOffBits + width*height*bpp/8;
    
    	bmpinfo.biSize = sizeof(BITMAPINFOHEADER);
    	bmpinfo.biWidth = width;
    	bmpinfo.biHeight = height;
    	bmpinfo.biPlanes = 1;
    	bmpinfo.biBitCount = bpp;
    	bmpinfo.biCompression = BI_RGB;
    	bmpinfo.biSizeImage = (width*bpp+31)/32*4*height;
    	bmpinfo.biXPelsPerMeter = 100;
    	bmpinfo.biYPelsPerMeter = 100;
    	bmpinfo.biClrUsed = 0;
    	bmpinfo.biClrImportant = 0;
    
    	fwrite (&bmpheader, sizeof(bmpheader), 1, fp);
    	fwrite (&bmpinfo, sizeof(bmpinfo), 1, fp);
    	fwrite (pFrameRGB->data[0], width*height*bpp/8, 1, fp);
    
    	fclose(fp);
    }
    
    
    int main (void)
    {
    	unsigned int i = 0, videoStream = -1;
    	AVCodecContext *pCodecCtx;
    	AVFormatContext *pFormatCtx = NULL;
    	AVCodec *pCodec;
    	AVFrame *pFrame, *pFrameRGB;
    	struct SwsContext *pSwsCtx;
    	const char *filename = "myData.h264";
    	AVPacket packet;
    	int frameFinished;
    	int PictureSize;
    	uint8_t *buf;
    
    	av_register_all();
    
    	if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) != 0 ){
    		printf ("av open input file failed!
    ");
    		exit (1);
    	}
    
    	if ( avformat_find_stream_info(pFormatCtx,NULL) < 0 ){
    		printf ("av find stream info failed!
    ");
    		exit (1);
    	}
    
    	for ( i=0; i<pFormatCtx->nb_streams; i++ ){
    		if ( pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO ){
    			videoStream = i;
    			break;
    		}
    	}
    
    	if (videoStream == -1){
    		printf ("find video stream failed!
    ");
    		exit (1);
    	}
    
    	pCodecCtx = pFormatCtx->streams[videoStream]->codec;
    
    	pCodec = avcodec_find_decoder (pCodecCtx->codec_id);
    	if (pCodec == NULL){
    		printf ("avcode find decoder failed!
    ");
    		exit (1);
    	}
    
    
    
    
    	if ( avcodec_open2(pCodecCtx, pCodec,NULL)<0 ){
    		printf ("avcode open failed!
    ");
    		exit (1);
    	}
    
    	pFrame = avcodec_alloc_frame();
    	pFrameRGB = avcodec_alloc_frame();
    
    	if ( (pFrame == NULL)||(pFrameRGB == NULL) ){
    		printf("avcodec alloc frame failed!
    ");
    		exit (1);
    	}
    
    	PictureSize = avpicture_get_size (PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
    
    	buf = (uint8_t *)av_malloc(PictureSize);
    
    	if ( buf == NULL ){
    		printf( "av malloc failed!
    ");
    		exit(1);
    	}
    
    	avpicture_fill ( (AVPicture *)pFrameRGB, buf, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
    
    	pSwsCtx = sws_getContext (pCodecCtx->width,
    			pCodecCtx->height,
    			pCodecCtx->pix_fmt,
    			pCodecCtx->width,
    			pCodecCtx->height,
    			PIX_FMT_BGR24,
    			SWS_BICUBIC,
    			NULL, NULL, NULL);
    
    		i = 0;
    
    	while(av_read_frame(pFormatCtx, &packet) >= 0){
    		if(packet.stream_index == videoStream){
    			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
    
    			if(frameFinished){ 
    					//反转图像
    				pFrame->data[0] += pFrame->linesize[0] * (pCodecCtx->height - 1);
    				pFrame->linesize[0] *= -1;
    				pFrame->data[1] += pFrame->linesize[1] * (pCodecCtx->height / 2 - 1);
    				pFrame->linesize[1] *= -1;
    				pFrame->data[2] += pFrame->linesize[2] * (pCodecCtx->height / 2 - 1);
    				pFrame->linesize[2] *= -1;
    
    				sws_scale (pSwsCtx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
    
    				SaveAsBMP (pFrameRGB, pCodecCtx->width, pCodecCtx->height, i++, 24);
    			} 
    		}
    		av_free_packet(&packet);
    	}
    
    	while(1){  
    		packet.data = NULL;  
    		packet.size = 0;  
    		avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
    
    		if(frameFinished){ 
    			//反转图像
    			pFrame->data[0] += pFrame->linesize[0] * (pCodecCtx->height - 1);
    			pFrame->linesize[0] *= -1;
    			pFrame->data[1] += pFrame->linesize[1] * (pCodecCtx->height / 2 - 1);
    			pFrame->linesize[1] *= -1;
    			pFrame->data[2] += pFrame->linesize[2] * (pCodecCtx->height / 2 - 1);
    			pFrame->linesize[2] *= -1;
    
    			sws_scale (pSwsCtx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
    
    			SaveAsBMP (pFrameRGB, pCodecCtx->width, pCodecCtx->height, i++, 24);
    		}else{
    			break;
    		}
    
    		av_free_packet(&packet);  
    	}  
    
    	sws_freeContext (pSwsCtx);
    	av_free (pFrame);
    	av_free (pFrameRGB);
    	avcodec_close (pCodecCtx);
    	avformat_close_input (&pFormatCtx);
    
    	return 0;
    }
    
    #ifdef  __cplusplus
    }
    #endif


    视频文件保存图片的另外一个方法,看代码

    /*File : yuv2pic
     *Auth : sjin
     *Date : 20141123
     *Mail : 413977243@qq.com
     */
    
    /* 
     * 參考博客http://blog.csdn.net/leixiaohua1020/article/details/25346147
     *本程序实现了YUV420P像素数据编码为JPEG图片。是最简单的FFmpeg编码方面的教程。
     *通过学习本样例能够了解FFmpeg的编码流程。
     */
    #include <libavcodec/avcodec.h>
    #include <libavformat/avformat.h>
    #include <libswscale/swscale.h>
    
    #define INPUT_FILE_NAME  "yuv420p.yuv"
    #define OUTPUT_FILE_NAME "encode.png"
    #define INPUT_FILE_WDITH  176
    #define INPUT_FILE_HEIGHT 144
    
    int main(int argc, char* argv[])
    {
    	AVFormatContext* pFormatCtx;
    	AVOutputFormat* fmt;
    	AVStream* video_st;
    	AVCodecContext* pCodecCtx;
    	AVCodec* pCodec;
    
    	uint8_t* picture_buf;
    	AVFrame* picture;
    	int size;
    
    	FILE *in_file = fopen(INPUT_FILE_NAME, "rb");	//视频YUV源文件 
    	int in_w = INPUT_FILE_WDITH;
    	int in_h = INPUT_FILE_HEIGHT;									//宽高
    	const char* out_file = OUTPUT_FILE_NAME;					//输出文件路径
    
    	av_register_all();
    #if 0
    	//方法1.组合使用几个函数
    	pFormatCtx = avformat_alloc_context();
    	//猜格式。用MJPEG编码
    	fmt = av_guess_format("mjpeg", NULL, NULL);
    	pFormatCtx->oformat = fmt;
    	//注意:输出路径
    	if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0){
    		printf("输出文件打开失败");
    		return -1;
    	}
    #else
    	//方法2.更加自己主动化一些
        //分配一个输出(out_file)文件格式的AVFormatContext的上下文句柄
    	avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
    	fmt = pFormatCtx->oformat;
    
    	video_st = avformat_new_stream(pFormatCtx,NULL);
    	if (video_st==NULL){
    		return -1;
    	}
    #endif
    	pCodecCtx = video_st->codec;
    	pCodecCtx->codec_id = fmt->video_codec;
    	pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
    	pCodecCtx->pix_fmt = PIX_FMT_YUVJ420P;
    
    	pCodecCtx->width = in_w;  
    	pCodecCtx->height = in_h;
    
    	pCodecCtx->time_base.num = 1;  
    	pCodecCtx->time_base.den = 25;   
    	//输出格式信息
    	av_dump_format(pFormatCtx, 0, out_file, 1);
    
    	pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
    	if (!pCodec){
    		printf("没有找到合适的编码器!");
    		return -1;
    	}
    	if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){
    		printf("编码器打开失败!");
    		return -1;
    	}
    	
        //申请解码后保存视频帧的空间,AVFrame结构体
    	picture = avcodec_alloc_frame();
        //即使我们申请的一帧的内存,当转换的时候,我们仍须要内存去保存原始的数据
        //利用以下的函数来获得原始数据帧的大小,手动分配内存
    	size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
    	picture_buf = (uint8_t *)av_malloc(size);
    	if (!picture_buf){
    		return -1;
    	}
        //设置指定图像的參数,并指着图像数据缓冲区
    	avpicture_fill((AVPicture *)picture, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
    
    	//写文件头
    	avformat_write_header(pFormatCtx,NULL);
    
    	AVPacket pkt;
    	int y_size = pCodecCtx->width * pCodecCtx->height;
    	av_new_packet(&pkt,y_size*3);
    	//读入YUV
    	if (fread(picture_buf, 1, y_size*3/2, in_file) < 0){
    		printf("文件读取错误");
    		return -1;
    	}
    
        //翻转图像
    	picture->data[0] = picture_buf;  // 亮度Y
    	picture->data[1] = picture_buf+ y_size;  // U 
    	picture->data[2] = picture_buf+ y_size*5/4; // V
    	int got_picture=0;
    	//编码
    	int ret = avcodec_encode_video2(pCodecCtx, &pkt,picture, &got_picture);
    	if(ret < 0){
    		printf("编码错误!
    ");
    		return -1;
    	}
    	if (got_picture==1){
    		pkt.stream_index = video_st->index;
    		ret = av_write_frame(pFormatCtx, &pkt);
    	}
    
    	av_free_packet(&pkt);
    	//写文件尾
    	av_write_trailer(pFormatCtx);
    
    	printf("编码成功!
    ");
    
    	if (video_st){
    		avcodec_close(video_st->codec);
    		av_free(picture);
    		av_free(picture_buf);
    	}
    	
    	avio_close(pFormatCtx->pb);
    	avformat_free_context(pFormatCtx);
    
    	fclose(in_file);
    
    	return 0;
    }
    

    以下是编译的时候,比較好用的Makefile文件
    # use pkg-config for getting CFLAGS and LDLIBS
    FFMPEG_LIBS=    libavdevice                        
                    libavformat                        
                    libavfilter                        
                    libavcodec                         
                    libswresample                      
                    libswscale                         
                    libavutil                          
    
    CFLAGS += -Wall -O2 -g
    CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
    LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
    
    EXAMPLES=  yuv2pic
                    						
    OBJS=$(addsuffix .o,$(EXAMPLES))
    
    # the following examples make explicit use of the math library
    LDLIBS += -lx264 -m32 -pthread -lm -ldl
    
    .phony:all clean
    
    all: $(OBJS) $(EXAMPLES)
    
    clean:
    	rm $(EXAMPLES) $(OBJS)
    


  • 相关阅读:
    Delphi 正则表达式之TPerlRegEx 类的属性与方法(2): 关于子表达式
    Delphi 正则表达式语法(7): 匹配转义字符
    Delphi 正则表达式之TPerlRegEx 类的属性与方法(1): 查找
    Delphi 正则表达式语法(5): 边界
    Delphi 正则表达式语法(8): 引用子表达式 也叫反向引用
    Delphi 正则表达式语法(3): 匹配范围
    Delphi 正则表达式语法(6): 贪婪匹配与非贪婪匹配
    Delphi 正则表达式语法(9): 临界匹配 也叫"预搜索"与"反向预搜索"
    Delphi 正则表达式语法(10): 选项
    善用 Web 调试代理工具
  • 原文地址:https://www.cnblogs.com/blfshiye/p/4280429.html
Copyright © 2020-2023  润新知