#define MAX_PLANES 3
typedef unsigned char BYTE;
typedef struct _DVDVideoPicture
{
BYTE* data[4]; // [4] = alpha channel, currently not used
int iLineSize[4]; // [4] = alpha channel, currently not used
}DVDVideoPicture;
YV12Image这个结构体用于保存抽取的Y U V数据
typedef struct YV12Image
{
BYTE *planeData[MAX_PLANES];
int planeSize[MAX_PLANES];
unsigned stride[MAX_PLANES];
unsigned width;
unsigned height;
unsigned flags;
unsigned cshift_x; /* this is the chroma shift used */
unsigned cshift_y;
}YV12Image;
可能有人要问了, DVDVideoPicture不是已经保存了 Y U V数据了吗,干嘛又定义一个YV12Image来拷贝一份呢?理由很简单,ffmpeg解码得出的图像右侧可能会有一段padding区域,所以DVDVideoPicture的iLineSize并不表示图像的宽度, AVContext的width才是图像的宽,并且 对应Y分量iLineSize[0] = pContext->width + padding;其中pContext AVContext类型, UV类似.那么YV12Image就是保存从DVDVideoPicture中出去pContext->width的图像数据.下面看具体的实现:
定义如下的结构体
用于保存YUV分别对应的texture结构体
typedef struct _YUVPLANE
{
GLuint ID;//texture的ID
unsigned texwidth;
unsigned texheight;
}YUVPLANE;
typedef YUVPLANE YUVPLANES[MAX_PLANES];
typedef struct YUVBUFFER
{
YV12Image image;// YUV源数据
YUVPLANES planes;// YUV对应的texture
}YUVBUFFER;
//////////////////////////////////////////////////////////////////
YUVBUFFER _yuvbuffer;
在解出一帧 的时候调用如下函数 初始化_yuvbuffer
- (void)configure:(unsigned int)width height:(unsigned int)height
{
_sourceWidth = width;
_sourceHeight = height;
if (!_bConfiged)
{
YV12Image &im = _yuvbuffer.image; // YUV对应的参数
YUVPLANES &planes = _yuvbuffer.planes; // YUV对应的纹理
im.width = _sourceWidth;
im.height = _sourceHeight;
im.cshift_x = 1;
im.cshift_y = 1;
im.stride[0] = im.width;
im.stride[1] = im.width >> im.cshift_x;
im.stride[2] = im.width >> im.cshift_x;
im.planeSize[0] = im.stride[0] * im.height;
im.planeSize[1] = im.stride[1] * (im.height >> im.cshift_y);
im.planeSize[2] = im.stride[2] * (im.height >> im.cshift_y);
for (int i = 0; i < 3; i++)
{
im.planeData[i] = new BYTE[im.planeSize[i]];
int shift = (i == 0) ? 0 : 1;
planes[i].texwidth = im.width >> shift;
planes[i].texheight = im.height >> shift;
}
_bConfiged = YES;
}
}
如果对这个代码有困惑的需要先了解下YUV的数据格式.
接下来就是Y U V数据的抽取
- (void)copyPictureFrom:(DVDVideoPicture *)pic
{
YV12Image &img = _yuvbuffer.image;
BYTE *s = pic->data[0];
BYTE *d = img.planeData[0];
int w = pic->iWidth;
int h = pic->iHeight;
if ( (w == pic->iLineSize[0]) && ((unsigned int) pic->iLineSize[0] == img.stride[0]))
{
fast_memcpy(d, s, w*h);
}
else
{
for (int y = 0; y < h; y++)
{
fast_memcpy(d, s, w);
s += pic->iLineSize[0]; // iLineSize padding
d += img.stride[0];
}
}
s = pic->data[1];
d = img.planeData[1];
w = pic->iWidth >> 1;
h = pic->iHeight >> 1;
if ( (w == pic->iLineSize[1]) && ((unsigned int) pic->iLineSize[1] == img.stride[1]))
{
fast_memcpy(d, s, w*h);
}
else
{
for (int y = 0; y < h; y++)
{
fast_memcpy(d, s, w);
s += pic->iLineSize[1];
d += img.stride[1];
}
}
s = pic->data[2];
d = img.planeData[2];
if ((w==pic->iLineSize[2]) && ((unsigned int) pic->iLineSize[2]==img.stride[2]))
{
fast_memcpy(d, s, w*h);
}
else
{
for (int y = 0; y < h; y++)
{
fast_memcpy(d, s, w);
s += pic->iLineSize[2];
d += img.stride[2];
}
}
@synchronized(glContext) {
if (glContext) {
[EAGLContext setCurrentContext:glContext];
[self uploadYV12Texture];glFlush();
[EAGLContext setCurrentContext:nil];
}
}
}
2.以上工作完成了Y U V数据的抽取,接下来就是把数据发送给GL ES,用到了OpenGL ES的纹理加载,感兴趣的可以去查找下纹理加载方法, 这里不再赘述, 需要主要的是Y U V对应三个不同的texture.
3.GLSL处理YUV-->RGB的转换(重点)
3.1先看顶点着色器process.vsh的实现:
attribute vec4 position;
attribute mediump vec4 textureCoordinate;
varying mediump vec2 coordinate;
void main()
{
gl_Position = position;
coordinate = textureCoordinate.xy;
}
precision mediump float;
uniform sampler2D SamplerY;
uniform sampler2D SamplerU;
uniform sampler2D SamplerV;
varying highp vec2 coordinate;
void main()
{
mediump vec3 yuv;
lowp vec3 rgb;
yuv.x = texture2D(SamplerY, coordinate).r;
yuv.y = texture2D(SamplerU, coordinate).r - 0.5;
yuv.z = texture2D(SamplerV, coordinate).r - 0.5;
rgb = mat3( 1, 1, 1,
0, -.21482, 2.12798,
1.28033, -.38059, 0) * yuv;
gl_FragColor = vec4(rgb, 1);
}
4.纹理贴图(不在赘述)