• 学习OpenCV——BOW特征提取函数(特征点篇)


    from:  http://www.xuebuyuan.com/582331.html

    简单的通过特征点分类的方法:                                                                      

    一、train

    1.提取+/- sample的feature,每幅图提取出的sift特征个数不定(假设每个feature有128维)

    2.利用聚类方法(e.g K-means)将不定数量的feature聚类为固定数量的(比如10个)words即BOW(bag of word)

    (本篇文章主要完成以上的工作!)

    3.normalize,并作这10个类的直方图e.g [0.1,0.2,0.7,0...0];

    4.将each image的这10个word作为feature_instance 和 (手工标记的) label(+/-)进入SVM训练

    二、predict

    1. 提取test_img的feature(如137个)

    2. 分别求each feature与10个类的距离(e.g. 128维欧氏距离),确定该feature属于哪个类

    3. normalize,并作这10个类的直方图e.g [0,0.2,0.2,0.6,0...0];

    4. 应用SVM_predict进行结果预测

    通过OpenCV实现feature聚类 BOW                                                             

    首先在此介绍一下OpenCV的特征描述符与BOW的通用函数。

    主要的通用接口有:

     

    1.特征点提取

    Ptr<FeatureDetector> FeatureDetector::create(const string& detectorType)

    	Ptr<FeatureDetector> FeatureDetector::create(const string& detectorType)
    // 	"FAST" – FastFeatureDetector 
    // 	"STAR" – StarFeatureDetector 
    // 	"SIFT" – SIFT (nonfree module)//必须使用 initModule_nonfree()初始化
    // 	"SURF" – SURF (nonfree module)//同上; 
    // 	"ORB" – ORB 
    // 	"MSER" – MSER 
    // 	"GFTT" – GoodFeaturesToTrackDetector 
    // 	"HARRIS" – GoodFeaturesToTrackDetector with Harris detector enabled 
    // 	"Dense" – DenseFeatureDetector 
    // 	"SimpleBlob" – SimpleBlobDetector 

    根据以上接口,测试不同的特征点:

    对同一幅图像进行水平翻转前后的两幅图像检测特征点检测结果,

    检测到的特征点的坐标类型为:pt: int / float(与keyPoint的性质有关)

    数量分别为num1, num2,

     "FAST" – FastFeatureDetector           pt:int (num1:615  num2:618)
     "STAR" – StarFeatureDetector           pt:int (num1:43   num2:42 )
     "SIFT" – SIFT (nonfree module)          pt:float(num1:155  num2:135)            //必须使用 initModule_nonfree()初始化
     "SURF" – SURF (nonfree module)     pt:float(num1:344  num2:342)           
    //同上; 
     "ORB" – ORB                                        pt:float(num1:496  num2:497)
     "MSER" – MSER                                 pt:float(num1:51   num2:45 )
     "GFTT" – GoodFeaturesToTrackDetector        pt:int (num1:744  num2:771)
     "HARRIS" – GoodFeaturesToTrackDetector with Harris detector enabled         pt:float(num1:162  num2:160)
     "Dense" – DenseFeatureDetector          pt:int (num1:3350 num2:3350)

    2.特征描述符提取

    Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExtractorType)

    //  Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExtractorType)	
    // 	"SIFT" – SIFT 
    // 	"SURF" – SURF 
    // 	"ORB" – ORB 
    // 	"BRIEF" – BriefDescriptorExtractor 
    

    3.描述符匹配

    Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create(const string& descriptorMatcherType)

    // 	descriptorMatcherType – Descriptor matcher type. 
    //	Now the following matcher types are supported: 
    // 		BruteForce (it uses L2 ) 
    // 		BruteForce-L1 
    // 		BruteForce-Hamming 
    // 		BruteForce-Hamming(2) 
    // 		FlannBased 
    	Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "BruteForce" );

    4.class BOWTrainer

    class BOWKmeansTrainer::public BOWTrainer:Kmeans算法训练

    BOWKMeansTrainer ::BOWKmeansTrainer(int clusterCount, const TermCriteria& termcrit=TermCriteria(), int attempts=3, int flags=KMEANS_PP_CENTERS)

    parameter same as 
    Kmeans

    代码实现:                                                                                                                    

    1.画特征点。

    2.特征点Kmeans聚类,每一种颜色代表一个类别。

    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/calib3d/calib3d.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "opencv2/features2d/features2d.hpp"
    #include "opencv2/nonfree/nonfree.hpp"
    
    #include <iostream>
    
    using namespace cv;
    using namespace std;
    
    #define ClusterNum 10
    
    void DrawAndMatchKeypoints(const Mat& Img1,const Mat& Img2,const vector<KeyPoint>& Keypoints1,
    	const vector<KeyPoint>& Keypoints2,const Mat& Descriptors1,const Mat& Descriptors2)
    {
    	Mat keyP1,keyP2;
    	drawKeypoints(Img1,Keypoints1,keyP1,Scalar::all(-1),0);
    	drawKeypoints(Img2,Keypoints2,keyP2,Scalar::all(-1),0);
    	putText(keyP1, "drawKeyPoints", cvPoint(10,30), FONT_HERSHEY_SIMPLEX, 1 ,Scalar :: all(-1));
    	putText(keyP2, "drawKeyPoints", cvPoint(10,30), FONT_HERSHEY_SIMPLEX, 1 ,Scalar :: all(-1));
    	imshow("img1 keyPoints",keyP1);
    	imshow("img2 keyPoints",keyP2);
    
    	Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "BruteForce" );
    	vector<DMatch> matches;
    	descriptorMatcher->match( Descriptors1, Descriptors2, matches );
    	Mat show;
    	drawMatches(Img1,Keypoints1,Img2,Keypoints2,matches,show,Scalar::all(-1),CV_RGB(255,255,255),Mat(),4);
    	putText(show, "drawMatchKeyPoints", cvPoint(10,30), FONT_HERSHEY_SIMPLEX, 1 ,Scalar :: all(-1));  
    	imshow("match",show);
    }
    
    //测试OpenCV:class BOWTrainer
    void BOWKeams(const Mat& img, const vector<KeyPoint>& Keypoints, 
    	const Mat& Descriptors, Mat& centers)
    {
    	//BOW的kmeans算法聚类;
    	BOWKMeansTrainer bowK(ClusterNum, 
    		cvTermCriteria (CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 0.1),3,2);
    	centers = bowK.cluster(Descriptors);
    	cout<<endl<<"< cluster num: "<<centers.rows<<" >"<<endl;
    	
    	Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "BruteForce" );
    	vector<DMatch> matches;
    	descriptorMatcher->match(Descriptors,centers,matches);//const Mat& queryDescriptors, const Mat& trainDescriptors第一个参数是待分类节点,第二个参数是聚类中心;
    	Mat demoCluster;
    	img.copyTo(demoCluster);
    	
    	//为每一类keyPoint定义一种颜色
    	Scalar color[]={CV_RGB(255,255,255),
         CV_RGB(255,0,0),CV_RGB(0,255,0),CV_RGB(0,0,255),
         CV_RGB(255,255,0),CV_RGB(255,0,255),CV_RGB(0,255,255),
         CV_RGB(123,123,0),CV_RGB(0,123,123),CV_RGB(123,0,123)};
    
    
    	for (vector<DMatch>::iterator iter=matches.begin();iter!=matches.end();iter++)
    	{
    		cout<<"< descriptorsIdx:"<<iter->queryIdx<<"  centersIdx:"<<iter->trainIdx
    			<<" distincs:"<<iter->distance<<" >"<<endl;
    		Point center= Keypoints[iter->queryIdx].pt;
    		circle(demoCluster,center,2,color[iter->trainIdx],-1);
    	}
    	putText(demoCluster, "KeyPoints Clustering: 一种颜色代表一种类型",
    		cvPoint(10,30), FONT_HERSHEY_SIMPLEX, 1 ,Scalar :: all(-1));
    	imshow("KeyPoints Clusrtering",demoCluster);
    	
    }
    
    
    
    
    int main()
    {
    	cv::initModule_nonfree();//使用SIFT/SURF create之前,必须先initModule_<modulename>(); 
    
    	cout << "< Creating detector, descriptor extractor and descriptor matcher ...";
    	Ptr<FeatureDetector> detector = FeatureDetector::create( "SIFT" );
    
    	Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create( "SIFT" );
    
    	Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "BruteForce" );
    
    
    
    	cout << ">" << endl;
    
    	if( detector.empty() || descriptorExtractor.empty() )
    	{
    		cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
    		return -1;
    	}
    	cout << endl << "< Reading images..." << endl;
    	Mat img1 = imread("D:/demo0.jpg");
    	Mat img2 = imread("D:/demo1.jpg");
    	cout<<endl<<">"<<endl;
    
    
    	//detect keypoints;
    	cout << endl << "< Extracting keypoints from images..." << endl;
    	vector<KeyPoint> keypoints1,keypoints2;
    	detector->detect( img1, keypoints1 );
    	detector->detect( img2, keypoints2 );
    	cout <<"img1:"<< keypoints1.size() << " points  img2:" <<keypoints2.size() 
    		<< " points" << endl << ">" << endl;
    	
    	//compute descriptors for keypoints;
    	cout << "< Computing descriptors for keypoints from images..." << endl;
    	Mat descriptors1,descriptors2;
    	descriptorExtractor->compute( img1, keypoints1, descriptors1 );
    	descriptorExtractor->compute( img2, keypoints2, descriptors2 );
    
    	cout<<endl<<"< Descriptoers Size: "<<descriptors2.size()<<" >"<<endl;
    	cout<<endl<<"descriptor's col: "<<descriptors2.cols<<endl
    		<<"descriptor's row: "<<descriptors2.rows<<endl;
    	cout << ">" << endl;
    
    	//Draw And Match img1,img2 keypoints
    	//匹配的过程是对特征点的descriptors进行match;
    	DrawAndMatchKeypoints(img1,img2,keypoints1,keypoints2,descriptors1,descriptors2);
    
    	Mat center;
    	//对img1提取特征点,并聚类
    	//测试OpenCV:class BOWTrainer
    	BOWKeams(img1,keypoints1,descriptors1,center);
    
    
    	waitKey();
    
    }

    通过Qt实现DrawKeypoints:

    void Qt_test1::on_DrawKeypoints_clicked()
    {
    	//initModule_nonfree();
    	Ptr<FeatureDetector> detector = FeatureDetector::create( "FAST" );
    	vector<KeyPoint> keypoints;
    	detector->detect( src, keypoints );
    
    	Mat DrawKeyP;
    	drawKeypoints(src,keypoints,DrawKeyP,Scalar::all(-1),0);
    	putText(DrawKeyP, "drawKeyPoints", cvPoint(10,30), 
    		FONT_HERSHEY_SIMPLEX, 0.5 ,Scalar :: all(255));
    	cvtColor(DrawKeyP, image, CV_RGB2RGBA);
    	QImage img = QImage((const unsigned char*)(image.data), 
    		image.cols, image.rows, QImage::Format_RGB32);
    	QLabel *label = new QLabel(this);
    	label->move(50, 50);//图像在窗口中所处的位置;
    	label->setPixmap(QPixmap::fromImage(img));
    	label->resize(label->pixmap()->size());	
    	label->show();
    }

    由于initModule_nonfree()总是出错,无法对SIFT与SURF特征点提取,

    而且无法实现聚类因为运行/BOW的kmeans算法聚类:BOWKMeansTrainer bowK(ClusterNum, cvTermCriteria (CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 0.1),3,2);总是出错,不知道咋解决~~~~~(>_<)~~~~ 需要继续学习

  • 相关阅读:
    大数据架构师技能图谱
    2018年,Java程序员转型大数据开发,是不是一个好选择?
    如何将java web项目上线/部署到公网
    Flume调优
    Spark流处理调优步骤
    zookeeper的WEB客户端zkui使用
    HBase各版本对Hadoop版本的支持情况
    java 代码实现使用Druid 链接池获取数据库链接
    安装postgreSQL出现configure: error: zlib library not found解决方法
    修改postgres密码
  • 原文地址:https://www.cnblogs.com/zengcv/p/5582965.html
Copyright © 2020-2023  润新知