codebook采用量化技术从时间序列中获得背景模型,能够检测像素剧烈变化、或者有移动物体或者更为复杂的背景模型。codebook为每个像素建立一个codebook,每个codebook含有一个或者多个codeword,codeword 记录背景学习的阈值、对应像素的更新时间以及访问频率等,通过这些信息,可以得知每个像素的变化情况,从而获得视频中的背景模型。
1.opencv实现简单Codebook
CodeBook算法为当前图像的每一个像素建立一个CodeBook(CB)结构,每个CodeBook结构又由多个CodeWord(CW)组成。CB和CW的形式如下:
CB={CW1,CW2,…CWn,t}
CW={lHigh,lLow,max,min,t_last,stale}
其中n为一个CB中所包含的CW的数目,当n太小时,退化为简单背景,当n较大时可以对复杂背景进行建模;t为CB更新的次数。CW是一个6元组,其中IHigh和ILow作为更新时的学习上下界,max和min记录当前像素的最大值和最小值。上次更新的时间t_last和陈旧时间stale(记录该CW多久未被访问)用来删除很少使用的CodeWord。
假设当前训练图像I中某一像素为I(x,y),该像素的CB的更新算法如下,另外记背景阈值的增长判定阈值为Bounds:
(1) CB的访问次数加1;
(2) 遍历CB中的每个CW,如果存在一个CW中的IHigh,ILow满足ILow≤I(x,y)≤IHigh,则转(4);
(3) 创建一个新的码字CWnew加入到CB中, CWnew的max与min都赋值为I(x,y),IHigh <- I(x,y) + Bounds,ILow <- I(x,y) – Bounds,并且转(6);
(4) 更新该码字的t_last,若当前像素值I(x,y)大于该码字的max,则max <- I(x,y),若I(x,y)小于该码字的min,则min <- I(x,y);
(5) 更新该码字的学习上下界,以增加背景模型对于复杂背景的适应能力,具体做法是:若IHigh < I(x,y) + Bounds,则IHigh 增长1,若ILow > I(x,y) – Bounds,则ILow减少1;
(6) 更新CB中每个CW的stale。
使用已建立好的CB进行运动目标检测的方法很简单,记判断前景的范围上下界为minMod和maxMod,对于当前待检测图像上的某一像素 I(x,y),遍历它对应像素背景模型CB中的每一个码字CW,若存在一个CW,使得I(x,y) < max + maxMod并且I(x,y) > min – minMod,则I(x,y)被判断为背景,否则被判断为前景。
在实际使用CodeBook进行运动检测时,除了要隔一定的时间对CB进行更新的同时,需要对CB进行一个时间滤波,目的是去除很少被访问到的CW,其方法是访问每个CW的stale,若stale大于一个阈值(通常设置为总更新次数的一半),移除该CW。
利用opencv实现:代码
codebook实现例子 #include <cv.h> #include <highgui.h> int CVCONTOUR_APPROX_LEVEL = 2; int CVCLOSE_ITR = 1; #define CV_CVX_WHITE CV_RGB(0xff,0xff,0xff) #define CV_CVX_BLACK CV_RGB(0x00,0x00,0x00) #define CHANNELS 3 typedef struct ce { uchar learnHigh[CHANNELS]; //High side threshold for learning uchar learnLow[CHANNELS]; //Low side threshold for learning uchar max[CHANNELS]; //High side of box boundary uchar min[CHANNELS]; //Low side of box boundary int t_last_update; //Allow us to kill stale entries int stale; //max negative run (longest period of inactivity) } code_element; //码书结构 typedef struct code_book { code_element **cb; //指向码字的指针 int numEntries; //码书包含的码字数量 int t; //count every access } codeBook; codeBook* TcodeBook;//包括所有像素的码书集合 ////////////////////////////////////////////////////////////// // int update_codebook(uchar *p, codeBook &c, unsigned cbBounds) // Updates the codebook entry with a new data point // p Pointer to a YUV or HSI pixel // c Codebook for this pixel // cbBounds Learning bounds for codebook (cvBounds must be of length equal to numChannels) // numChannels Number of color channels we’re learning // codebook index int update_codebook(uchar* p,codeBook& c,unsigned* cbBounds,int numChannels) { int i = 0 ; unsigned int high[3],low[3]; int n; for(n=0; n< numChannels; n++) { high[n] = *(p+n)+*(cbBounds+n); if(high[n] > 255) high[n] = 255; low[n] = *(p+n)-*(cbBounds+n); if(low[n] < 0) low[n] = 0; } int matchChannel; // SEE IF THIS FITS AN EXISTING CODEWORD for(i=0; i<c.numEntries; i++) { matchChannel = 0; for(n=0; n<numChannels; n++) { if((c.cb[i]->learnLow[n] <= *(p+n)) && //Found an entry for this channel (*(p+n) <= c.cb[i]->learnHigh[n])) { matchChannel++; } } if(matchChannel == numChannels) //If an entry was found { c.cb[i]->t_last_update = c.t; //adjust this codeword for the first channel for(n=0; n<numChannels; n++) { if(c.cb[i]->max[n] < *(p+n)) { c.cb[i]->max[n] = *(p+n); } else if(c.cb[i]->min[n] > *(p+n)) { c.cb[i]->min[n] = *(p+n); } } break; } } // OVERHEAD TO TRACK POTENTIAL STALE ENTRIES // for(int s=0; s<c.numEntries; s++) { // Track which codebook entries are going stale: int negRun = c.t - c.cb[s]->t_last_update; if(c.cb[s]->stale < negRun) c.cb[s]->stale = negRun; } // ENTER A NEW CODEWORD IF NEEDED if(i == c.numEntries) //if no existing codeword found, make one { code_element **foo = new code_element* [c.numEntries+1]; for(int ii=0; ii<c.numEntries; ii++) { foo[ii] = c.cb[ii]; } foo[c.numEntries] = new code_element; if(c.numEntries) delete [] c.cb; c.cb = foo; for(n=0; n<numChannels; n++) { c.cb[c.numEntries]->learnHigh[n] = high[n]; c.cb[c.numEntries]->learnLow[n] = low[n]; c.cb[c.numEntries]->max[n] = *(p+n); c.cb[c.numEntries]->min[n] = *(p+n); } c.cb[c.numEntries]->t_last_update = c.t; c.cb[c.numEntries]->stale = 0; c.numEntries += 1; } // SLOWLY ADJUST LEARNING BOUNDS for(n=0; n<numChannels; n++) { if(c.cb[i]->learnHigh[n] < high[n]) c.cb[i]->learnHigh[n] += 1; if(c.cb[i]->learnLow[n] > low[n]) c.cb[i]->learnLow[n] -= 1; } return(i); } /////////////////////////////////////////////////////////////////// //int clear_stale_entries(codeBook &c) // During learning, after you’ve learned for some period of time, // periodically call this to clear out stale codebook entries // // c Codebook to clean up // // Return // number of entries cleared // int clear_stale_entries(codeBook &c) { int staleThresh = c.t>>1; int *keep = new int [c.numEntries]; int keepCnt = 0; // SEE WHICH CODEBOOK ENTRIES ARE TOO STALE for(int i=0; i<c.numEntries; i++) { if(c.cb[i]->stale > staleThresh) keep[i] = 0; //Mark for destruction else { keep[i] = 1; //Mark to keep keepCnt += 1; } } // KEEP ONLY THE GOOD // c.t = 0; //Full reset on stale tracking code_element **foo = new code_element* [keepCnt]; int k=0; for(int ii=0; ii<c.numEntries; ii++) { if(keep[ii]) { foo[k] = c.cb[ii]; //We have to refresh these entries for next clearStale foo[k]->t_last_update = 0; k++; } } // CLEAN UP // delete [] keep; delete [] c.cb; c.cb = foo; int numCleared = c.numEntries - keepCnt; c.numEntries = keepCnt; return(numCleared); } //////////////////////////////////////////////////////////// // uchar background_diff( uchar *p, codeBook &c, // int minMod, int maxMod) // Given a pixel and a codebook, determine if the pixel is // covered by the codebook // // p Pixel pointer (YUV interleaved) // c Codebook reference // numChannels Number of channels we are testing // maxMod Add this (possibly negative) number onto // max level when determining if new pixel is foreground // minMod Subract this (possibly negative) number from // min level when determining if new pixel is foreground // // NOTES: // minMod and maxMod must have length numChannels, // e.g. 3 channels => minMod[3], maxMod[3]. There is one min and // one max threshold per channel. // // Return // 0 => background, 255 => foreground // uchar background_diff( uchar* p, codeBook& c, int numChannels, int* minMod, int* maxMod ) { int i = 0 ; int matchChannel; // SEE IF THIS FITS AN EXISTING CODEWORD // for(i=0; i<c.numEntries; i++) { matchChannel = 0; for(int n=0; n<numChannels; n++) { if((c.cb[i]->min[n] - minMod[n] <= *(p+n)) && (*(p+n) <= c.cb[i]->max[n] + maxMod[n])) { matchChannel++; //Found an entry for this channel } else { break; } } if(matchChannel == numChannels) { break; //Found an entry that matched all channels } } if(i >= c.numEntries) return(255); else return(0); } void connected_Components(IplImage *mask, int poly1_hull0, float perimScale, int *num, CvRect *bbs, CvPoint *centers) { static CvMemStorage* mem_storage = NULL; static CvSeq* contours = NULL; //CLEAN UP RAW MASK cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_OPEN ,1); cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_CLOSE,2); //FIND CONTOURS AROUND ONLY BIGGER REGIONS if( mem_storage==NULL ) mem_storage = cvCreateMemStorage(0); else cvClearMemStorage(mem_storage); CvContourScanner scanner = cvStartFindContours(mask,mem_storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE); CvSeq* c; int numCont = 0; while( (c = cvFindNextContour( scanner )) != NULL ) { double len = cvContourPerimeter( c ); double q = (mask->height + mask->width) /perimScale; //calculate perimeter len threshold if( len < q ) //Get rid of blob if it's perimeter is too small { cvSubstituteContour( scanner, NULL ); } else //Smooth it's edges if it's large enough { CvSeq* c_new; if(poly1_hull0) //Polygonal approximation of the segmentation c_new = cvApproxPoly(c,sizeof(CvContour),mem_storage,CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL,0); else //Convex Hull of the segmentation c_new = cvConvexHull2(c,mem_storage,CV_CLOCKWISE,1); cvSubstituteContour( scanner, c_new ); numCont++; } } contours = cvEndFindContours( &scanner ); // PAINT THE FOUND REGIONS BACK INTO THE IMAGE cvZero( mask ); IplImage *maskTemp; //CALC CENTER OF MASS AND OR BOUNDING RECTANGLES if(num != NULL) { int N = *num, numFilled = 0, i=0; CvMoments moments; double M00, M01, M10; maskTemp = cvCloneImage(mask); for(i=0, c=contours; c != NULL; c = c->h_next,i++ ) { if(i < N) //Only process up to *num of them { cvDrawContours(maskTemp,c,CV_CVX_WHITE, CV_CVX_WHITE,-1,CV_FILLED,8); //Find the center of each contour if(centers != NULL) { cvMoments(maskTemp,&moments,1); M00 = cvGetSpatialMoment(&moments,0,0); M10 = cvGetSpatialMoment(&moments,1,0); M01 = cvGetSpatialMoment(&moments,0,1); centers[i].x = (int)(M10/M00); centers[i].y = (int)(M01/M00); } //Bounding rectangles around blobs if(bbs != NULL) { bbs[i] = cvBoundingRect(c); } cvZero(maskTemp); numFilled++; } //Draw filled contours into mask cvDrawContours(mask,c,CV_CVX_WHITE,CV_CVX_WHITE,-1,CV_FILLED,8); //draw to central mask } //end looping over contours *num = numFilled; cvReleaseImage( &maskTemp); } else { for( c=contours; c != NULL; c = c->h_next ) { cvDrawContours(mask,c,CV_CVX_WHITE, CV_CVX_BLACK,-1,CV_FILLED,8); } } } IplImage* pFrame = NULL; IplImage* pFrameHSV = NULL; IplImage* pFrImg = NULL; CvCapture* pCapture = NULL; int nFrmNum = 0; //IplImage* pFrImg = NULL; //IplImage* pBkImg = NULL; unsigned cbBounds = 5; int height,width; int nchannels; int minMod[3]={30,30,30}, maxMod[3]={30,30,30}; int main(int argc, char* argv[]) { //创建窗口 cvNamedWindow("video", 1); cvNamedWindow("HSV空间图像",1); cvNamedWindow("foreground",1); //使窗口有序排列 cvMoveWindow("video", 30, 0); cvMoveWindow("HSV空间图像", 360, 0); cvMoveWindow("foreground", 690, 0); //打开视频文件, if( !(pCapture = cvCaptureFromFile("tingche.avi"))) { fprintf(stderr, "Can not open video file %s\n"); return -2; } int j; //逐帧读取视频 while(pFrame = cvQueryFrame( pCapture )) { nFrmNum++; cvShowImage("video", pFrame); if (nFrmNum == 1) { height = pFrame->height; width = pFrame->width; nchannels = pFrame->nChannels; pFrameHSV = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,3); pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1); //cvCvtColor(pFrame, pFrameHSV, CV_BGR2HSV);//色彩空间转化 TcodeBook = new codeBook[width*height+1]; for(j = 0; j < width*height; j++) { TcodeBook[j].numEntries = 0; TcodeBook[j].t = 0; } } if (nFrmNum<=30) { //cvCvtColor(pFrame, pFrameHSV, CV_BGR2HSV);//色彩空间转化 cvCopyImage(pFrame,pFrameHSV); //学习背景 for(j = 0; j < width*height; j++) update_codebook((uchar*)pFrameHSV->imageData+j*nchannels, TcodeBook[j],&cbBounds,3); } else { //cvCvtColor(pFrame, pFrameHSV, CV_BGR2HSV);//色彩空间转化 cvCopyImage(pFrame,pFrameHSV); if(nFrmNum%20 == 0) { for(j = 0; j < width*height; j++) update_codebook((uchar*)pFrameHSV->imageData+j*nchannels, TcodeBook[j],&cbBounds,3); } if(nFrmNum%40 == 0) { for(j = 0; j < width*height; j++) clear_stale_entries(TcodeBook[j]); } for(j = 0; j < width*height; j++) { if(background_diff((uchar*)pFrameHSV->imageData+j*nchannels, TcodeBook[j],3,minMod,maxMod)) { pFrImg->imageData[j] = 255; } else { pFrImg->imageData[j] = 0; } } //connected_Components(pFrImg,1,20,NULL,NULL, NULL); cvShowImage("foreground", pFrImg); cvShowImage("HSV空间图像", pFrameHSV); } if( cvWaitKey(2) >= 0 ) break; } // end of while-loop for(j = 0; j < width*height; j++) { if (!TcodeBook[j].cb) delete [] TcodeBook[j].cb; } if (!TcodeBook) delete [] TcodeBook; //销毁窗口 cvDestroyWindow("video"); cvDestroyWindow("HSV空间图像"); cvDestroyWindow("foreground"); return 0; }
该算法测试的时候,对背景的变换有一定的适应性,但是获取的前景目标空洞比较多,需要后面的区域处理上做一些功夫。opencv上面的例子,只是使用图像的亮度作为参数来更新背景模型,对阴影、环境光线的变化等等 处理的并不好。
如果对codebook的参数做一些调整效果会更好,可以参考论文: