好久没有发OpenCV的博客了,最近想到了一个识别地图轮廓的方案,就写来试试。(识别中国的28个省份地图轮廓,不考虑直辖市)
首先,我的基本思路是 用最小的矩形将地图的轮廓圈出来,可以根据长方形的长宽比判断,也可将其缩放至特定的大小,计算其轮廓上的像素个数来判断。
缺点:用摄像头读取图片时,使用这种方法会有一些误差。
也可以ANN训练识别,但是这样做效率低。
step 1. 读取图片、处理图像
Mat src = imread("12.jpg"); Mat grayImage; cvtColor(src, grayImage, CV_BGR2GRAY); threshold(grayImage, grayImage, 48, 255, CV_THRESH_BINARY); imshow("grayImage", grayImage);
问题来了,处理图片后的grayImage根本无法显示,结果为一张灰色的图片。
最后发现,因为大意,程序的最后没有加 cvWaitKey(0); 这句话,因此图片无法显示。
step 2. 寻找轮廓并画出
#include <opencv2/opencv.hpp> #include <iostream> using namespace cv; using namespace std; int main() { Mat src = imread("timg.jpg"); Mat grayImage, dstImage; src.copyTo(dstImage); int g_nStructElementSize = 1; //结构元素(内核矩阵)的尺寸 //获取自定义核 Mat element = getStructuringElement(MORPH_RECT, Size(2 * g_nStructElementSize + 1, 2 * g_nStructElementSize + 1), Point(g_nStructElementSize, g_nStructElementSize)); erode(src, src, element); cvtColor(src, grayImage, CV_BGR2GRAY); threshold(grayImage, grayImage, 48, 255, CV_THRESH_BINARY); imshow("2dst", grayImage); vector<vector<Point>> contours; vector<Vec4i> hierarchy; findContours(grayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); int i = 0; Point2f pp[5][4]; vector<vector<Point>>::iterator It; Rect rect[10]; for (It = contours.begin(); It < contours.end(); It++){ //画出包围轮廓的最小矩形 Point2f vertex[4]; rect[i] = boundingRect(*It); vertex[0] = rect[i].tl(); //矩阵左上角的点 vertex[1].x = (float)rect[i].tl().x, vertex[1].y = (float)rect[i].br().y; //矩阵左下方的点 vertex[2] = rect[i].br(); //矩阵右下角的点 vertex[3].x = (float)rect[i].br().x, vertex[3].y = (float)rect[i].tl().y; //矩阵右上方的点 for (int j = 0; j < 4; j++) line(dstImage, vertex[j], vertex[(j + 1) % 4], Scalar(0, 0, 255), 1); } imshow("dst", dstImage); cvWaitKey(0); return 0; }
结果发现根本找不到轮廓,最后发现原来是threshold函数参数设置错误,参数应如下:
threshold(grayImage, grayImage,48,255, THRESH_BINARY_INV);
同时由于地图边框线太细,应当先腐蚀图像,再二值化:
int g_nStructElementSize = 1; //结构元素(内核矩阵)的尺寸 ///获取自定义核 Mat element = getStructuringElement(MORPH_RECT, Size(2 * g_nStructElementSize + 1, 2 * g_nStructElementSize + 1), Point(g_nStructElementSize, g_nStructElementSize)); erode(src, src, element);
step 3. 收集地图的数据后,用10个省得数据来检测
最后在不断的探索中,发现有两个数据可以作为一个地图的特征,即轮廓长宽比和轮廓面积与图片的像素数之比。
最后的代码如下:
#include <opencv2/opencv.hpp> #include <iostream> #include <math.h> using namespace cv; using namespace std; Mat result; const double cha = 0.02; //可接受范围的误差 bool compare(double a, double b) { if (abs(a - b) < cha){ return true; } return false; } bool result_output(double rate1,double rate2) { if (compare(rate1, (double)172 / 96) && compare(rate2, 0.171524)){ cout << "陕西省" << endl; return true; } if (compare(rate1, (double)172 / 143) && compare(rate2, 0.270173)){ cout << "安徽省" << endl; return true; } if (compare(rate1, (double)154 / 123) && compare(rate2, 0.230148)){ cout << "福建省" << endl; return true; } if (compare(rate1, (double)170 / 190) && compare(rate2, 0.132584)){ cout << "甘肃省" << endl; return true; } if (compare(rate1, (double)155 / 208) && compare(rate2, 0.200146)){ cout << "广东省" << endl; return true; } if (compare(rate1, (double)129 / 180) && compare(rate2, 0.22718)){ cout <<"广西壮族自治区" << endl; return true; } if (compare(rate1, (double)118 / 145) && compare(rate2, 0.219451)){ cout << "贵州省" << endl; return true; } if (compare(rate1, (double)77 / 96) && compare(rate2, 0.196616)){ cout << "海南省" << endl; return true; } if (compare(rate1, (double)162 / 119) && compare(rate2, 0.247134)){ cout << "河北省" << endl; return true; } if (compare(rate1, (double)125 / 135) && compare(rate2, 0.176323)){ cout << "河南省" << endl; return true; } cout << "无法检测" << endl; return false; } int main() { Mat src = imread("1.jpg"); Mat grayImage, dstImage; src.copyTo(dstImage); int g_nStructElementSize = 1; //结构元素(内核矩阵)的尺寸 ///获取自定义核 Mat element = getStructuringElement(MORPH_RECT, Size(2 * g_nStructElementSize + 1, 2 * g_nStructElementSize + 1), Point(g_nStructElementSize, g_nStructElementSize)); erode(src, src, element); cvtColor(src, grayImage, CV_BGR2GRAY); blur(grayImage, grayImage, Size(3, 3)); threshold(grayImage, grayImage,48,255, THRESH_BINARY_INV); grayImage.copyTo(result); vector< vector<Point> > contours; vector<Vec4i> hierarchy; findContours(grayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); drawContours(dstImage, contours, -1, (255, 255, 255)); vector<Point> point = contours[0]; Rect rect = boundingRect(point); Point2f vertex[4]; vertex[0] = rect.tl(); vertex[1].x = (float)rect.tl().x, vertex[1].y = (float)rect.br().y; vertex[2] = rect.br(); vertex[3].x = (float)rect.br().x, vertex[3].y = (float)rect.tl().y; for (int j = 0; j < 4; j++) line(dstImage, vertex[j], vertex[(j + 1) % 4], Scalar(0, 0, 255), 1); int x = rect.x, y = rect.y; int h = rect.height, w = rect.width; double rate = (double)h / w; cout << "height:" << h << endl; cout << "width:" << w << endl; cout << "h / w:" << rate << endl; double area = contourArea(point, false); double sum = grayImage.cols * grayImage.rows; cout << "面积:" << area << endl; cout << "面积比:" << area / sum << endl; imshow("show", dstImage); result_output(rate, area / sum); cvWaitKey(0); return 0; }
最后发现一个问题,由于需要通过摄像头检测地图,图片可能会有一定角度的倾斜,因此应将Rect换成RotatedRect。
RotatedRect rect = minAreaRect(point); Point2f vertex[4]; rect.points(vertex); for (int j = 0; j < 4; j++) line(dstImage, vertex[j], vertex[(j + 1) % 4], Scalar(0, 0, 255), 1); int h = rect.size.height, w = rect.size.width;