• audio


    // media.cpp : 定义控制台应用程序的入口点。
    //
    
    https://wenku.baidu.com/view/e910c474c5da50e2524d7fb4.html
    
    https://blog.csdn.net/leixiaohua1020/article/details/10528443
    
    
    
    最主要这个examplesdemuxing_decoding.c
    
    #include "stdafx.h"
    
    /*
    int _tmain(int argc, _TCHAR* argv[])
    {
        return 0;
    }
    */
    #include "stdafx.h"
    extern "C"
    {
    #include <stdio.h>
    
    #include "libavformat/avformat.h"
    #include "libavutil/dict.h"
         
    #include <libavcodec/avcodec.h>
    #include <libavformat/avformat.h>
    
    #include <libswscale/swscale.h>
    }
    #pragma comment(lib,"lib/avcodec.lib")
    #pragma comment(lib,"lib/avformat.lib")
    #pragma comment(lib,"lib/avutil")
    //#pragma comment(lib,"lib/SDLmain.lib")
    #pragma comment(lib,"lib/avdevice.lib")
    #pragma comment(lib,"lib/avfilter.lib")
    #pragma comment(lib,"lib/postproc.lib")
    
    #pragma comment(lib,"lib/swresample.lib")
    #pragma comment(lib,"lib/swscale.lib")
    
    #pragma comment(lib, "sdl2.lib")
    #include <iostream>
    
    
    
     
    #include <SDL2/SDL.h>
    #include <SDL2/SDL_thread.h>
     
    #ifdef __MINGW32__
    #undef main 
    #endif
     
    #include <stdio.h>
     
    int
    randomInt(int min, int max)
    {
        return min + rand() % (max - min + 1);
    }
     #define MAX_AUDIO_FRAME_SIZE 192000
    SDL_AudioSpec wanted_spec, spec;
    
    int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {
        static AVPacket pkt;
        static uint8_t *audio_pkt_data = NULL;
        static int audio_pkt_size = 0;
        static AVFrame frame;
        
        int len1, data_size = 0;
        
        for (;;) {
            while(audio_pkt_size > 0) {
                int got_frame = 0;
                len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
                if (len1 < 0) {
                    // if error, skip frame.
                    audio_pkt_size = 0;
                    break;
                }
                audio_pkt_data += len1;
                audio_pkt_size -= len1;
                if (got_frame) {
                    data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, frame.nb_samples, aCodecCtx->sample_fmt, 1);
                    memcpy(audio_buf, frame.data[0], data_size);
                }
                if (data_size <= 0) {
                    // No data yet, get more frames.
                    continue;
                }
                // We have data, return it and come back for more later.
                return data_size;
            }
            if (pkt.data) {
                av_packet_unref(&pkt);
            }
            
    
            
            //if (packet_queue_get(&audioq, &pkt, 1) < 0)
            {
                return -1;
            }
            audio_pkt_data = pkt.data;
            audio_pkt_size = pkt.size;
        }
    }
    
    void audio_callback(void *userdata, Uint8 *stream, int len) {
        AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
        int len1, audio_size;
        
        static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
        static unsigned int audio_buf_size = 0;
        static unsigned int audio_buf_index = 0;
        
        while (len > 0) {
            if (audio_buf_index >= audio_buf_size) {
                // We have already sent all our data; get more.
                audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);
                if (audio_size < 0) {
                    // If error, output silence.
                    audio_buf_size = 1024; // arbitrary?
                    memset(audio_buf, 0, audio_buf_size);
                } else {
                    audio_buf_size = audio_size;
                }
                audio_buf_index = 0;
            }
            len1 = audio_buf_size - audio_buf_index;
            if (len1 > len) {
                len1 = len;
            }
            memcpy(stream, (uint8_t *) audio_buf + audio_buf_index, len1);
            len -= len1;
            stream += len1;
            audio_buf_index += len1;
        }
    }
    
    int _tmain(int argc, char *argv[]) {
        AVFormatContext *pFormatCtx = NULL;
        int             i, videoStream,audioStream;
        AVCodecContext  *pCodecCtx = NULL,*pCodecAudioCtx = NULL;
        AVCodec         *pCodec = NULL,*pCodecAudio = NULL;
        AVFrame         *pFrame = NULL;
        AVPacket        packet;
        int             frameFinished;
        //float           aspect_ratio;
        
        AVDictionary    *optionsDict = NULL ,*audioOptionsDict=NULL;
        struct SwsContext *sws_ctx = NULL;
        //SDL_CreateTexture();
        SDL_Texture    *bmp = NULL;
        SDL_Window     *screen = NULL;
        SDL_Rect        rect;
        SDL_Event       event;
        /*
        if(argc < 2) {
            fprintf(stderr, "Usage: test <file>
    ");
            exit(1);
        }*/
        // Register all formats and codecs
        av_register_all();
        
        if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
            fprintf(stderr, "Could not initialize SDL - %s
    ", SDL_GetError());
            exit(1);
        }
        
        // Open video file
        if(avformat_open_input(&pFormatCtx,"D://shitu.mkv"/* argv[1]"D://3s.mp4"*/, NULL, NULL)!=0)
            return -1; // Couldn't open file
        
        if(avformat_find_stream_info(pFormatCtx, NULL)<0)
            return -1; // Couldn't find stream information
        
        // Dump information about file onto standard error
        av_dump_format(pFormatCtx, 0, argv[1], 0);
        
        // Find the first video stream
        videoStream=-1;
        for(i=0; i<pFormatCtx->nb_streams; i++)
            if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
                videoStream=i;
                break;
            }
            else if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
                audioStream=i;
                break;
            }
        if(videoStream==-1||audioStream==-1)
            return -1; // Didn't find a video stream
        
        // Get a pointer to the codec context for the video stream
        pCodecCtx=pFormatCtx->streams[videoStream]->codec;
        pCodecAudioCtx=pFormatCtx->streams[audioStream]->codec;
        // Find the decoder for the video stream
        pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
        if(pCodec==NULL) {
            fprintf(stderr, "Unsupported codec!
    ");
            return -1; // Codec not found
        }
        
        pCodecAudio=avcodec_find_decoder(pCodecAudioCtx->codec_id);
        if(pCodecAudio==NULL) {
            fprintf(stderr, "Unsupported audio codec!
    ");
            return -1; // Codec not found
        }
        
        // Open codec
        if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
            return -1; // Could not open codec
        
        //--
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
            fprintf(stderr, "SDL_OpenAudio: %s
    ", SDL_GetError());
            return -1;



    } avcodec_open2(pCodecAudioCtx, pCodecAudio,
    &audioOptionsDict); //-- pFrame=av_frame_alloc(); AVFrame* pFrameYUV =av_frame_alloc(); if( pFrameYUV == NULL ) return -1; //-- AVFrame* pAudioFrame =av_frame_alloc(); //-- screen = SDL_CreateWindow("My Game Window",SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, pCodecCtx->width, pCodecCtx->height, SDL_WINDOW_FULLSCREEN | SDL_WINDOW_OPENGL); SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, 0); if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting "); exit(1); } bmp = SDL_CreateTexture(renderer,SDL_PIXELFORMAT_YV12,SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height); sws_ctx = sws_getContext(pCodecCtx->width,pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width,pCodecCtx->height, AV_PIX_FMT_YUV420P,SWS_BILINEAR,NULL,NULL, NULL); int numBytes = avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); uint8_t* buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); avpicture_fill((AVPicture *)pFrameYUV, buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); i=0; rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { sws_scale(sws_ctx,(uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); ////iPitch 计算yuv一行数据占的字节数 SDL_UpdateTexture( bmp, &rect, pFrameYUV->data[0], pFrameYUV->linesize[0] ); SDL_RenderClear( renderer ); SDL_RenderCopy( renderer, bmp, &rect, &rect ); SDL_RenderPresent( renderer ); } SDL_Delay(50); //Sleep(500); }else if (packet.stream_index == audioStream) { } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: SDL_Quit(); exit(0); break; default: break; } } SDL_DestroyTexture(bmp); // Free the YUV frame av_free(pFrame); av_free(pFrameYUV); // Close the codec avcodec_close(pCodecCtx); // Close the video file avformat_close_input(&pFormatCtx); return 0; }

    https://www.cnblogs.com/lgh1992314/p/5834639.html

    这个_-to thi tha

    回看tha视频,不要杂念,杂容易慢,一段只一件

  • 相关阅读:
    cookie和session区别
    jsp编写页面时常见错误提示
    web处理jsp文件的三个阶段
    JSP页面元素
    B/S与C/S区别
    url组成部分
    css选择器
    ol,ul,dl,table标签的基本语法
    剑指offer-把二叉树打印成多行
    剑指offer-从上往下打印二叉树
  • 原文地址:https://www.cnblogs.com/cnchengv/p/9833854.html
Copyright © 2020-2023  润新知