• OPENCV图像变换-2


    一.经典霍夫变换

             霍夫变换是图像处理中的一种特征提取技术,该方法通过在一个参数空间中通过计算累计结果的局部最大值来得到一个符合该特定形状的集合,作为结果.

             运用两个坐标空间之间的变换,将一个空间中具有相同形状的曲线或者是直线映射到另一个坐标空间中的一个点形成峰值,从而将统计任意性状化为统计峰值问题.

             opencv中,霍夫线变换市一中寻找直线的方法,在使用霍夫线变换之前,要先对图像进行边缘检测的处理,霍夫变换的直接输入为阈值化之后的二值图像

             opencv至此三种不同的霍夫线变换,标准霍夫变换(SHT),多尺度霍夫变换(MSHT),累计概率霍夫变换(PPHT).

            PPHT是标准霍夫变换的一种改进方式,并不像标准霍夫变换一样将累加平面的所有的点累加,而只是累加其中的一小部分,该想法是如果峰值能够足够高,那么只需要一小部分时间来寻找它就够了.

             霍夫变化的基本原理是:任意一条笛卡尔坐标下的直线,都可以化为极坐标形式,直线上的某一点其经过的所有直线的参数构成一条曲线,如果图片中有足够多的点,这些点的曲线会有一个共同的焦点,那么这个交点处的值就是这一条直线,只要设置足够多的阈值,就可以找出这一条直线.

             API:void houghLins(源图像,输出寻找到的直线,double 像素为单位的距离精度,double 弧度为单位的角度精度,int 累加平面的阈值,double 多尺度变换的除数距离,double 多尺度时的单位角度的除数距离);

             注:源为单通道,二值化图像,目标为输出矢量,并非mat,而是以极坐标参数对定义,一个vector里面包含若干参数对,距离精度为直线搜索时,距离尺寸的增长单位,角度精度为直线搜索时,每次增长的角度精度,阈值参数是当某一条直线被查找出来的时候,他在累加器中必须达到的累加器阈值,多尺度的角度精度和距离精度除数如果都为0,那么这是经典的霍夫变换.

             经典霍夫变换的使用代码

    Mat srcImage,srcImageGray,cannyImage;
    vector<Vec2f>lines;
    
    //三个变量,距离精度rho 角度精度 theta 阈值参数 threshold
    const int g_rhoMax = 10;
    int g_rhoValue;
    
    const int g_thetaMax = 200;
    int g_thetaValue;
    
    const int g_thresholdMax = 250;
    int g_thresholdValue;
    
    void onTrackBarrho(int pos,void* userData);
    void onTrackBartheta(int pos,void* userData);
    void onTrackBarThreshold(int pos,void* userData);
    
    int main(int argc,char* argv[])
    {
       srcImage = imread("F:\opencv\OpenCVImage\HoughLine.jpg");
       if(srcImage.channels() == 1)
       {
           srcImageGray = srcImage.clone();
       }
       else
       {
           cvtColor(srcImage, srcImageGray, CV_RGB2GRAY);
       }
       
       namedWindow("src image");
       namedWindow("line image");
       
       g_rhoValue = 0;
       g_thetaValue = 100;
       g_thresholdValue = 150;
       createTrackbar("rho value", "line image", &g_rhoValue, g_rhoMax,onTrackBarrho,0);
       createTrackbar("theta value", "line image", &g_thetaValue, g_thetaMax,onTrackBartheta ,0);
       createTrackbar("threshold value", "line image", &g_thresholdValue, g_thresholdMax,onTrackBarThreshold,0);
       onTrackBarThreshold(g_thresholdValue,0);
       
       imshow("src image", srcImageGray);
       
       moveWindow("src image", 0, 0);
       moveWindow("line image", srcImageGray.cols*2, 0);
       
       waitKey(0);
       return 0;
    }
    
    void onTrackBarrho(int pos,void* userData)
    {
       int rhoValue = g_rhoValue+1;
       int thetaValue = g_thetaValue+1;
       int threshold = g_thresholdValue+1;
       double theta = CV_PI/(double)thetaValue;
       Canny(srcImageGray, cannyImage, 50, 200,3);
       HoughLines(cannyImage, lines, rhoValue, theta, threshold,0,0);
       imshow("canny image", cannyImage);
       moveWindow("canny image", srcImageGray.cols, 0);
       Mat tempImage(srcImageGray.rows,srcImageGray.cols,CV_8UC3,Scalar(255,0,0));
       for(size_t i = 0; i < lines.size(); i++)
       {
           float rhoResult = lines[i][0];
           float thetaResult = lines[i][1];
           Point pt1,pt2;
           double a = cos(thetaResult);
           double b = sin(thetaResult);
           double x0 = a*rhoResult;
           double y0 = b*rhoResult;
           pt1.x = cvRound(x0 + 1000*(-b));
           pt1.y = cvRound(y0 + 1000*(a));
           pt2.x = cvRound(x0 - 1000*(-b));
           pt2.y = cvRound(y0 - 1000*(a));
           line(tempImage, pt1, pt2, Scalar(0,0,255),1);
       }
       imshow("line image", tempImage);
    }
    void onTrackBartheta(int pos,void* userData)
    {
       int rhoValue = g_rhoValue+1;
       int thetaValue = g_thetaValue+1;
       int threshold = g_thresholdValue+1;
       double theta = CV_PI/(double)thetaValue;
       Canny(srcImageGray, cannyImage, 50, 200,3);
       HoughLines(cannyImage, lines, rhoValue, theta, threshold,0,0);
       imshow("canny image", cannyImage);
       moveWindow("canny image", srcImageGray.cols, 0);
       Mat tempImage(srcImageGray.rows,srcImageGray.cols,CV_8UC3,Scalar(255,0,0));
       for(size_t i = 0; i < lines.size(); i++)
       {
           float rhoResult = lines[i][0];
           float thetaResult = lines[i][1];
           Point pt1,pt2;
           double a = cos(thetaResult);
           double b = sin(thetaResult);
           double x0 = a*rhoResult;
           double y0 = b*rhoResult;
           pt1.x = cvRound(x0 + 1000*(-b));
           pt1.y = cvRound(y0 + 1000*(a));
           pt2.x = cvRound(x0 - 1000*(-b));
           pt2.y = cvRound(y0 - 1000*(a));
           line(tempImage, pt1, pt2, Scalar(0,0,255),1);
       }
       imshow("line image", tempImage);
    }
    void onTrackBarThreshold(int pos,void* userData)
    {
       int rhoValue = g_rhoValue+1;
       int thetaValue = g_thetaValue+1;
       int threshold = g_thresholdValue+1;
       double theta = CV_PI/(double)thetaValue;
       Canny(srcImageGray, cannyImage, 50, 200,3);
       HoughLines(cannyImage, lines, rhoValue, theta, threshold,0,0);
       imshow("canny image", cannyImage);
       moveWindow("canny image", srcImageGray.cols, 0);
       Mat tempImage(srcImageGray.rows,srcImageGray.cols,CV_8UC3,Scalar(255,0,0));
       for(size_t i = 0; i < lines.size(); i++)
       {
           float rhoResult = lines[i][0];
           float thetaResult = lines[i][1];
           Point pt1,pt2;
           double a = cos(thetaResult);
           double b = sin(thetaResult);
           double x0 = a*rhoResult;
           double y0 = b*rhoResult;
           pt1.x = cvRound(x0 + 1000*(-b));
           pt1.y = cvRound(y0 + 1000*(a));
           pt2.x = cvRound(x0 - 1000*(-b));
           pt2.y = cvRound(y0 - 1000*(a));
           line(tempImage, pt1, pt2, Scalar(0,0,255),1);
       }
       imshow("line image", tempImage);
    }

    二.累计概率霍夫变换

             累计概率霍夫变换是使用的比较多的霍夫变换,因为和标准霍夫变换比起来,累计概率霍夫变换计算快一些,并且结果也足够精确.

             API:void houghLineP(源图像,结果向量,double 进步距离,double 进角弧度,int 累计阈值,int 最小线段长度,int 同一行点与点之间的最小连接距离);

             注:最小线段长度,默认为0,比这个参数短的线将不输出在结果中,连接最大距离,默认值为0,低于这个距离的将不构成同一根线,输出的点为四个元素的矢量,分别是(x1,y1,x2,y2);用vetc(vec4i)lines来表示,起点和终点坐标.

    使用代码

    Mat srcImage,srcGrayImage,cannyImage;
    vector<Vec4i>lines;
    
    //lowThreshold 默?认¨?upThreshold为alowthtreshold的Ì?三¨y倍À?
    const int g_lowThresholdMax = 84;
    int g_lowThresholdValue;
    void onTrackBarCannyLowThreshold(int pos,void* userData);
    #define CALC_UPTHRESHOLD_VALUE(value) value*3;
    
    //canny sobel 孔¡Á径?
    const int g_cannySobelSizeMax = 3;
    int g_cannySobelSizeValue;
    void onTrackBarCannySobelSize(int pos,void* userData);
    
    
    //hough 步?进?精?度¨¨
    const int g_houghlineRhoMax = 9;
    int g_houghlineRhoValue;
    void onTrackBarHoughlineRhoValue(int pos,void* userData);
    
    //houghlinep 角?度¨¨精?度¨¨
    const int g_houghlineThetaMax = 179;
    int g_houghlineThetaValue;
    void onTrackBarHoughlineThetaValue(int pos,void* userData);
    
    //累¤?计?阈D值¦Ì
    const int g_houghlineThresholdMax = 299;
    int g_houghlineThresholdValue;
    void onTrackBarHoughlineThresholdValue(int pos,void* userData);
    
    //最Á?小?线?段?长¡è度¨¨
    const int g_houghlineMindistMax = 39;
    int g_houghlineMindistValue;
    void onTrackBarHoughlineMindistValue(int pos,void* userData);
    
    
    int main(int argc,char* argv[])
    {
       srcImage = imread("F:\opencv\OpenCVImage\HoughLineP.jpg");
       if(srcImage.channels() == 1)
       {
           srcGrayImage = srcImage.clone();
       }
       else
       {
           cvtColor(srcImage, srcGrayImage, CV_RGB2GRAY);
       }
       
       namedWindow("src image");
       namedWindow("canny image");
       namedWindow("dst image");
       
       g_lowThresholdValue = 84;
       g_cannySobelSizeValue = 0;
       createTrackbar("low threshold", "canny image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarCannyLowThreshold,0);
       createTrackbar("sobel size", "canny image", &g_cannySobelSizeValue, g_cannySobelSizeMax,onTrackBarCannySobelSize,0);
       onTrackBarCannySobelSize(g_cannySobelSizeValue, 0);
       
       g_houghlineRhoValue = 1;
       g_houghlineThetaValue = 179;
       g_houghlineMindistValue = 19;
       g_houghlineThresholdValue = 10;
       createTrackbar("rho value", "dst image", &g_houghlineRhoValue, g_houghlineRhoMax,onTrackBarHoughlineRhoValue,0);
       createTrackbar("theta value", "dst image", &g_houghlineThetaValue,g_houghlineThetaMax,onTrackBarHoughlineThetaValue,0);
       createTrackbar("threshold value", "dst image", &g_houghlineThresholdValue, g_houghlineThresholdMax,onTrackBarHoughlineThresholdValue,0);
       createTrackbar("min dist", "dst image", &g_houghlineMindistValue, g_houghlineMindistMax,onTrackBarHoughlineMindistValue,0);
       onTrackBarHoughlineRhoValue(g_houghlineRhoValue, 0);
       
       imshow("src image", srcImage);
       
       moveWindow("src image", 0, 0);
       moveWindow("canny image", srcGrayImage.cols, 0);
       moveWindow("dst image", srcImage.cols*2, 0);
       
       waitKey(0);
       return 0;
    }
    
    
    void onTrackBarCannyLowThreshold(int pos,void* userData)
    {
       int sobelSize = g_cannySobelSizeValue*2+3;
       int lowThreshold = g_lowThresholdValue+1;
       int upThreshold = CALC_UPTHRESHOLD_VALUE(lowThreshold);
       Canny(srcGrayImage, cannyImage, lowThreshold, upThreshold,sobelSize);
       imshow("canny image", cannyImage);
    }
    
    void onTrackBarCannySobelSize(int pos,void* userData)
    {
       int sobelSize = g_cannySobelSizeValue*2+3;
       int lowThreshold = g_lowThresholdValue+1;
       int upThreshold = CALC_UPTHRESHOLD_VALUE(lowThreshold);
       Canny(srcGrayImage, cannyImage, lowThreshold, upThreshold,sobelSize);
       imshow("canny image", cannyImage);
    }
    
    void onTrackBarHoughlineRhoValue(int pos,void* userData)
    {
       double rhoValue = g_houghlineRhoValue+1;
       double thetaValue = g_houghlineThetaValue+1;
       int thresholdValue = g_houghlineThresholdValue+1;
       int minDist = g_houghlineMindistValue+1;
       HoughLinesP(cannyImage, lines, rhoValue, CV_PI/thetaValue, thresholdValue,minDist);
       Mat dstImage(srcGrayImage.rows,srcGrayImage.cols,CV_8UC3,Scalar(0,0,0));
       for (size_t i = 0; i < lines.size(); i++) {
           line(dstImage, Point(lines[i][0],lines[i][1]), Point(lines[i][2],lines[i][3]),Scalar(0,0,255),LINE_4);
       }
       imshow("dst image", dstImage);
    }
    
    void onTrackBarHoughlineThetaValue(int pos,void* userData)
    {
       double rhoValue = g_houghlineRhoValue+1;
       double thetaValue = g_houghlineThetaValue+1;
       int thresholdValue = g_houghlineThresholdValue+1;
       int minDist = g_houghlineMindistValue+1;
       HoughLinesP(cannyImage, lines, rhoValue, CV_PI/thetaValue, thresholdValue,minDist);
       Mat dstImage(srcGrayImage.rows,srcGrayImage.cols,CV_8UC3,Scalar(0,0,0));
       for (size_t i = 0; i < lines.size(); i++) {
           line(dstImage, Point(lines[i][0],lines[i][1]), Point(lines[i][2],lines[i][3]),Scalar(0,0,255),LINE_4);
       }
       imshow("dst image", dstImage);
    }
    
    void onTrackBarHoughlineThresholdValue(int pos,void* userData)
    {
       double rhoValue = g_houghlineRhoValue+1;
       double thetaValue = g_houghlineThetaValue+1;
       int thresholdValue = g_houghlineThresholdValue+1;
       int minDist = g_houghlineMindistValue+1;
       HoughLinesP(cannyImage, lines, rhoValue, CV_PI/thetaValue, thresholdValue,minDist);
       Mat dstImage(srcGrayImage.rows,srcGrayImage.cols,CV_8UC3,Scalar(0,0,0));
       for (size_t i = 0; i < lines.size(); i++) {
           line(dstImage, Point(lines[i][0],lines[i][1]), Point(lines[i][2],lines[i][3]),Scalar(0,0,255),LINE_4);
       }
       imshow("dst image", dstImage);
    }
    
    void onTrackBarHoughlineMindistValue(int pos,void* userData)
    {
       double rhoValue = g_houghlineRhoValue+1;
       double thetaValue = g_houghlineThetaValue+1;
       int thresholdValue = g_houghlineThresholdValue+1;
       int minDist = g_houghlineMindistValue+1;
       HoughLinesP(cannyImage, lines, rhoValue, CV_PI/thetaValue, thresholdValue,minDist);
       Mat dstImage(srcGrayImage.rows,srcGrayImage.cols,CV_8UC3,Scalar(0,0,0));
       for (size_t i = 0; i < lines.size(); i++) {
           line(dstImage, Point(lines[i][0],lines[i][1]), Point(lines[i][2],lines[i][3]),Scalar(0,0,255),LINE_4);
       }
       imshow("dst image", dstImage);
    }

    三.霍夫圆变换

             霍夫圆变换与霍夫线变换的原理基本一致,任意一个圆上任意一个点也可以表示为以圆心,半径为代表的一个三维空间中的曲线,多个曲线的交点,代表一个确定的圆.

             在OPENCV中,使用霍夫梯度法解决圆变换问题,具体过程是,首先对图像进行边缘检测,然后计算结果中不为0的值的XY方向的梯度,利用梯度累加斜率指定直线上的没一个点(斜率为指定最大值和最小值之间的距离),最后标记边缘图像中每一个非零像素的位置.,从二维累加器中选择候选中心.

             霍夫圆变换的缺陷在于阈值设置如果比较低,那么算法消耗时间长,同心圆的间隔比较近的时候,倾向于保存半径最大的圆,并且霍夫梯度法在某些时候也会产生噪声.

             API:void houghCircles(源图,结果向量,int 检测方法,int 输入图像和累加器的分辨率之比,int 圆心之间最小距离,double 算法高阈值,低阈值默认为高阈值1/2,double 圆心累累加器阈值,int 半径最小值,int 半径最大值);

             注:源图像为八位单通道灰度图,目标向量为vecter<vec3f>,向量中存储的依次是圆心x,圆心y,半径r,检测方法目前只有霍夫梯度法HOUGH_GRENIENT      ,霍夫梯度法默认高阈值100.低阈值为其1/2,圆心间距太小,多个相邻的圆会重合,太大,某些圆不能被检测出,累加器阈值太小,会检测出很多不存在的圆,阈值越大,越容易检测出完美的圆

             另外,函数可能找不到合适的半径供用户使用,可以忽略半径,用额外的步骤确定.

    使用例程

    Mat srcImage,srcGrayImage;
    vector<Vec3f>circles;
    
    //圆2形?最Á?小?距¨¤离¤?
    const int g_houghcircleMinDistMax = 39;
    int g_houghcircleMinDistValue;
    void onTrackBarMinDist(int pos,void* userData);
    
    //canny算?子Á¨®高?阈D值¦Ì
    const int g_houghcircleThresholdUpMax = 254;
    int g_houghcircleThresholdUpValue;
    void onTrackBarThresholdUp(int pos,void* userData);
    
    //圆2心?累¤?加¨®器¡Â阈D值¦Ì
    const int g_houghcircleCenterThresholdMax = 254;
    int g_houghcircleCenterThresholdValue;
    void onTrackBarCenterThreshold(int pos,void* userData);
    
    //最Á?小?半ã?径?
    const int g_houghcircleMinRadiusMax = 200;
    int g_houghcircleMinRadiusValue;
    void onTrackBarRadiusMin(int pos,void* userData);
    
    //最Á?大䨮半ã?径?
    const int g_houghcircleMaxRadiusMax = 1000;
    int g_houghcircleMaxRadiusValue;
    void onTrackBarRadiusMax(int pos,void* userData);
    
    
    
    int main(int argc,char* argv[])
    {
       srcImage = imread("F:\opencv\OpenCVImage\HoughCircle.jpg");
       if(srcImage.channels() == 3)
       {
           cvtColor(srcImage, srcGrayImage, CV_RGB2GRAY);
       }
       else
       {
           srcGrayImage = srcImage.clone();
       }
       
       namedWindow("src gray image");
       namedWindow("dst image");
       
       g_houghcircleMinDistValue = 9;
       g_houghcircleThresholdUpValue = 199;
       g_houghcircleCenterThresholdValue = 99;
       g_houghcircleMinRadiusValue = 39;
       g_houghcircleMaxRadiusValue = 399;
       createTrackbar("center min dist", "dst image", &g_houghcircleMinDistValue, g_houghcircleMinDistMax,onTrackBarMinDist,0);
       createTrackbar("canny threshold", "dst image", &g_houghcircleThresholdUpValue, g_houghcircleThresholdUpMax,onTrackBarThresholdUp,0);
       createTrackbar("center threshold", "dst image", &g_houghcircleCenterThresholdValue, g_houghcircleCenterThresholdMax,onTrackBarCenterThreshold,0);
       createTrackbar("min radius", "dst image", &g_houghcircleMinRadiusValue, g_houghcircleMinRadiusMax,onTrackBarRadiusMin,0);
       createTrackbar("max radius", "dst image", &g_houghcircleMaxRadiusValue, g_houghcircleMaxRadiusMax,onTrackBarRadiusMax,0);
       onTrackBarRadiusMin(g_houghcircleMinDistValue,0);
       
       imshow("src gray image", srcGrayImage);
       
       moveWindow("src gray image", 0, 0);
       moveWindow("dst image", srcGrayImage.cols, 0);
       
       waitKey(0);
       return 0;
    }
    
    void onTrackBarMinDist(int pos,void* userData)
    {
       int minDist = g_houghcircleMinDistValue+1;
       double cannyThreshold = (double)g_houghcircleThresholdUpValue+1;
       double centerThreshold = (double)g_houghcircleCenterThresholdValue+1;
       int minRadius = g_houghcircleMinRadiusValue+1;
       int maxRadius = g_houghcircleMaxRadiusValue+1;
       if(minRadius >= maxRadius)
       {
           imshow("dst image", srcGrayImage);
       }
       else
       {
           Mat dstImage(srcGrayImage.rows,srcGrayImage.cols,CV_8UC3,Scalar(0,0,0));
           HoughCircles(srcGrayImage, circles, HOUGH_GRADIENT, 1, minDist,cannyThreshold,centerThreshold,minRadius,maxRadius);
           for (size_t i = 0; i < circles.size(); i++) {
               circle(dstImage, Point(circles[i][0],circles[i][1]), circles[i][2], Scalar(0,0,255));
           }
           imshow("dst image", dstImage);
       }
    }
    void onTrackBarThresholdUp(int pos,void* userData)
    {
       int minDist = g_houghcircleMinDistValue+1;
       double cannyThreshold = (double)g_houghcircleThresholdUpValue+1;
       double centerThreshold = (double)g_houghcircleCenterThresholdValue+1;
       int minRadius = g_houghcircleMinRadiusValue+1;
       int maxRadius = g_houghcircleMaxRadiusValue+1;
       if(minRadius >= maxRadius)
       {
           imshow("dst image", srcGrayImage);
       }
       else
       {
           Mat dstImage(srcGrayImage.rows,srcGrayImage.cols,CV_8UC3,Scalar(0,0,0));
           HoughCircles(srcGrayImage, circles, HOUGH_GRADIENT, 1, minDist,cannyThreshold,centerThreshold,minRadius,maxRadius);
           for (size_t i = 0; i < circles.size(); i++) {
               circle(dstImage, Point(circles[i][0],circles[i][1]), circles[i][2], Scalar(0,0,255),LINE_AA);
           }
           imshow("dst image", dstImage);
       }
    }
    void onTrackBarCenterThreshold(int pos,void* userData)
    {
       int minDist = g_houghcircleMinDistValue+1;
       double cannyThreshold = (double)g_houghcircleThresholdUpValue+1;
       double centerThreshold = (double)g_houghcircleCenterThresholdValue+1;
       int minRadius = g_houghcircleMinRadiusValue+1;
       int maxRadius = g_houghcircleMaxRadiusValue+1;
       if(minRadius >= maxRadius)
       {
           imshow("dst image", srcGrayImage);
       }
       else
       {
           Mat dstImage(srcGrayImage.rows,srcGrayImage.cols,CV_8UC3,Scalar(0,0,0));
           HoughCircles(srcGrayImage, circles, HOUGH_GRADIENT, 1, minDist,cannyThreshold,centerThreshold,minRadius,maxRadius);
           for (size_t i = 0; i < circles.size(); i++) {
               circle(dstImage, Point(circles[i][0],circles[i][1]), circles[i][2], Scalar(0,0,255));
           }
           imshow("dst image", dstImage);
       }
    }
    void onTrackBarRadiusMax(int pos,void* userData)
    {
       int minDist = g_houghcircleMinDistValue+1;
       double cannyThreshold = (double)g_houghcircleThresholdUpValue+1;
       double centerThreshold = (double)g_houghcircleCenterThresholdValue+1;
       int minRadius = g_houghcircleMinRadiusValue+1;
       int maxRadius = g_houghcircleMaxRadiusValue+1;
       if(minRadius >= maxRadius)
       {
           imshow("dst image", srcGrayImage);
       }
       else
       {
           Mat dstImage(srcGrayImage.rows,srcGrayImage.cols,CV_8UC3,Scalar(0,0,0));
           HoughCircles(srcGrayImage, circles, HOUGH_GRADIENT, 1, minDist,cannyThreshold,centerThreshold,minRadius,maxRadius);
           for (size_t i = 0; i < circles.size(); i++) {
               circle(dstImage, Point(circles[i][0],circles[i][1]), circles[i][2], Scalar(0,0,255));
           }
           imshow("dst image", dstImage);
       }
    }
    void onTrackBarRadiusMin(int pos,void* userData)
    {
       int minDist = g_houghcircleMinDistValue+1;
       double cannyThreshold = (double)g_houghcircleThresholdUpValue+1;
       double centerThreshold = (double)g_houghcircleCenterThresholdValue+1;
       int minRadius = g_houghcircleMinRadiusValue+1;
       int maxRadius = g_houghcircleMaxRadiusValue+1;
       if(minRadius >= maxRadius)
       {
           imshow("dst image", srcGrayImage);
       }
       else
       {
           Mat dstImage(srcGrayImage.rows,srcGrayImage.cols,CV_8UC3,Scalar(0,0,0));
           HoughCircles(srcGrayImage, circles, HOUGH_GRADIENT, 1, minDist,cannyThreshold,centerThreshold,minRadius,maxRadius);
           for (size_t i = 0; i < circles.size(); i++) {
               circle(dstImage, Point(circles[i][0],circles[i][1]), circles[i][2], Scalar(0,0,255));
           }
           imshow("dst image", dstImage);
       }
    }

    四.重映射

             重映射是指将一张图片上的某个指定位置的像素拷贝到目标图像的某一个位置上

             API:void remap(源图,目的图,映射数组1,映射数组2,int 插值方式,int 边界模式,int 边界值)

             注:源和目标都必须为八位单通道图像或者浮点单通道图像

    使用例程

    int main(int argc,char* argv[])
    {
       Mat srcImage,mapx,mapy,dstImage;
       srcImage = imread("F:\opencv\OpenCVImage\remap.jpg");
       mapx.create(srcImage.size(), CV_32FC1);
       mapy.create(srcImage.size(), CV_32FC1);
       dstImage.create(srcImage.rows, srcImage.cols, srcImage.type());
    
       //核?心?就¨ª是º?这a个?mapx 和¨ªmapy
       for(int j = 0; j < srcImage.rows;j++)
       {
           for (int i = 0; i < srcImage.cols; i++)
           {
               mapx.at<float>(j,i) = static_cast<float>(i);
               mapy.at<float>(j,i) = static_cast<float>(srcImage.rows-j);
           }
       }
       remap(srcImage, dstImage, mapx, mapy, INTER_LINEAR,BORDER_CONSTANT,Scalar(0,0,0));
       imshow("src image", srcImage);
       imshow("dst image", dstImage);
       
       moveWindow("src image", 0, 0);
       moveWindow("dst image", srcImage.cols, 0);
       
       waitKey(0);
       return 0;
    }

    五.仿射变换

             集合中,一个向量空间进行一次线性变换并加上那个一次平移,变成另一个向量空间,叫做仿射变换,一个任意的仿射变换都可以表示成乘以一个矩阵,再加上一个向量的形式,

             仿射变换可以实现图片的缩放,旋转,平移,其代表的是两张图片之间的映射关系.

             API:void warpAffine(源图像,目标图像,变换矩阵,Size 输出图像尺寸,int 插值方式,int 边界模式,int 恒定边界取值)

             注:源可以使多通道或者单通道,目标和源的类型一致,变换矩阵一般来说可以通过getRotationMatrix2D来获得,而不需要自己去实现.获得的是一个2*3的变换矩阵,插值方式默认为INTER_LINEAR,和RESIZE插值方式相似,但是又添加了两种新的插值方式,CV_WARP_FILL_OUTLIERS填充所有输出图像的像素,CV_WARP_INVERSE_MAP表明该变换试一次输出图像到输入图像的逆变换.

             API Mat getRotationMatrix2D(Point源图的旋转中心,double 旋转角度,double 缩放系数)

             注:该API反悔的矩阵可以作为仿射变换的变换矩阵使用

    使用代码实际例程如下

    Mat srcImage,dstImage,rotationImage;
    const int g_angelMax = 360;
    int g_angelValue;
    void onTrackBarRotationAngel(int pos,void* userData);
    
    const int g_scaleMax = 10;//最Á?大䨮10倍À?缩?放¤?系¦Ì数ºy
    int g_scaleValue;
    void onTrackBarScale(int pos,void* userData);
    
    int main(int argc,char* argv[])
    {
       srcImage = imread("F:\opencv\OpenCVImage\warpAffine.jpg");
       namedWindow("src image");
       namedWindow("dst image");
       
       g_angelValue = 0;
       g_scaleValue = 1;
       createTrackbar("angel value", "dst image", &g_angelValue, g_angelMax,onTrackBarRotationAngel,0);
       createTrackbar("scale value", "dst image", &g_scaleValue, g_scaleMax,onTrackBarScale,0);
       onTrackBarRotationAngel(g_angelValue,0);
       
       imshow("src image", srcImage);
       
       moveWindow("src image", 0, 0);
       moveWindow("dst image", srcImage.cols, 0);
       
       waitKey(0);
       return 0;
    }
    
    void onTrackBarRotationAngel(int pos,void* userData)
    {
       if(g_scaleValue == 0)g_scaleValue = 1;
       double scale = 1.0/g_scaleValue;
       rotationImage = getRotationMatrix2D(Point(srcImage.cols/2,srcImage.rows/2), g_angelValue, scale);
       warpAffine(srcImage, dstImage, rotationImage, srcImage.size(),INTER_LINEAR);
       imshow("dst image", dstImage);
    }
    
    void onTrackBarScale(int pos,void* userData)
    {
       if(g_scaleValue == 0)g_scaleValue = 1;
       double scale = 1.0/g_scaleValue;
       rotationImage = getRotationMatrix2D(Point(srcImage.cols/2,srcImage.rows/2), g_angelValue, scale);
       warpAffine(srcImage, dstImage, rotationImage, srcImage.size(),INTER_LINEAR);
       imshow("dst image", dstImage);
    }

    六.直方图均衡化

             如果需要扩大图像的动态范围,最常使用的技术就是直方图均衡化,属于弧度变幻的一个重要应用,用一定的算法,使直方图大致平和的过程.拉伸像素强度的分布范围来增强图像的对比度,例如原先图像的像素灰度集中的100-200区域,那么将100-200区域的数据扩展到0-255,就属于直方图均衡化.

             均衡化以后的图像只能近似的均匀分布,动态范围扩大,本质是扩大了量化间距,而量化级别反而减少了.这是一个缺点,均衡化以后可能会出现伪轮廓,或者消除原来图像的有效轮廓.

             在泛白缓和的图像中,均衡化会合并一些像素灰度,从而增大对比度

             原始图像对比度本省就比较高的,在均衡化会造成灰度调和,从而降低对比度

             从色彩上说,均衡化会是图像的表现力更出色.

             API:void equalizeHist(源图像,目标图像)

             源和目标,都必须为八位单通道图像

    使用实例

    int main(int argc,char* argv[])
    {
       Mat srcImage,dstImage;
       srcImage = imread("F:\opencv\OpenCVImage\equalizeHist.jpg");
       if(srcImage.channels() != 1)
       {
           cvtColor(srcImage, srcImage, CV_RGB2GRAY);
       }
       
       equalizeHist(srcImage, dstImage);
       
       //Mat dstRgbImage(srcImage.rows,srcImage.cols,CV_8UC3);
       //cvtColor(dstImage, dstRgbImage, CV_GRAY2BGR);
       
       imshow("src image", srcImage);
       imshow("dst image", dstImage);
       moveWindow("src image", 0, 0);
       moveWindow("dst image", srcImage.cols, 0);
       waitKey(0);
       return 0;
    }
  • 相关阅读:
    spring cloud教程
    ideaaaaaaaaa
    Django
    Django 基础介绍
    Pychram
    python
    python
    python
    Python
    Python -- Scrapy 命令行工具(command line tools)
  • 原文地址:https://www.cnblogs.com/dengxiaojun/p/5252263.html
Copyright © 2020-2023  润新知