cv::getRotationMatrix2D(center, angle_deg, scale) // 角度从图像上看为逆时针方向,从xy坐标看为顺时针方向
cv::String cv::format( const char* fmt, ... );
OpenCV Rect矩形框提取ROI,不能超出图像范围
cv::Mat image = cv::imread("xxx。jpg"); cv::Rect roi(-100,200, 500,500); // x< 0 cv::imshow("image(roi)", image(roi)); // image(roi)报错
cv::Mat image = cv::imread("test.jpg"); cv::Rect roi(100,200, image.rows+100, image.cols); // x< 0 cv::Mat img_show = image(roi); // image(roi)报错 cv::imshow("image(roi)", img_show);
OpenCV 矩形&|运算例程:
// Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); cv::Rect r1(0, 0, 200, 100); cv::Rect r2(250, 50, 200, 100); cv::Rect rect_overlap = r1 & r2; cv::Rect rect_sum = r1 | r2; std::cout<< "r1 = " << r1 << std::endl; std::cout<< "r2 = " << r2 << std::endl; std::cout<< "rect_overlap = " << rect_overlap << std::endl; std::cout<< "rect_sum = " << rect_sum << std::endl; 运行结果如下: r1 = [200 x 100 from (0, 0)] r2 = [200 x 100 from (250, 50)] rect_overlap = [0 x 0 from (0, 0)] rect_sum = [450 x 150 from (0, 0)]
测试cv::solvePnP,循环体最小耗时<0.4ms
for (int k = 0; k < 100; k++) { const int64 time_start = cv::getTickCount(); cv::Mat rvec, tvec; std::vector<cv::Point2f> image_points; std::vector<cv::Point3f> object_points; object_points.push_back(cv::Point3f(0.0f, 0.0f, 0.0f)); object_points.push_back(cv::Point3f(1.0f, 0.0f, 0.0f)); object_points.push_back(cv::Point3f(1.0f, 1.0f, 0.0f)); object_points.push_back(cv::Point3f(0.0f, 1.0f, 0.0f)); image_points.push_back(cv::Point2f(0.0f, 0.0f)); image_points.push_back(cv::Point2f(3.0f, 0.0f)); image_points.push_back(cv::Point2f(2.0f, 2.0f)); image_points.push_back(cv::Point2f(0.0f, 3.0f)); cv::solvePnP( object_points, // 3-d points in object coordinate image_points, // 2-d points in image coordinates intrinsic, // Our camera matrix distortion, // distortion coefficients rvec, // Output rotation *vector*. tvec // Output translation vector. ); printf("time span = %f secs ", (cv::getTickCount()-time_start)/cv::getTickFrequency()); }
OpenCV多目标跟踪例程:
#include<tracking.hpp> #include<highgui.hpp> #include<video.hpp> #include<coreutility.hpp> #include<vector> using namespace cv; using namespace std; int main() { Mat frame; VideoCapture cap("1.mp4");//输入待处理的视频 cap >> frame; vector<Rect> rois; selectROIs("rois", frame, rois, false); // GUI操作框选ROIs if (rois.size()<1) return 0; MultiTracker trackers; vector<Rect2d> obj; vector<Ptr<Tracker>> algorithms; for (auto i = 0; i < rois.size(); i++) { obj.push_back(rois[i]); algorithms.push_back(TrackerKCF::create()); } trackers.add(algorithms, frame, obj); while (cap.read(frame)) { bool ok = trackers.update(frame); if (ok) { for (auto j = 0; j < trackers.getObjects().size(); j++) { rectangle(frame, trackers.getObjects()[j], Scalar(255, 0, 0), 2, 1); } imshow("tracker", frame); } if (waitKey(1) == 27)break; } return 0; }
cv::Point 支持直接乘法
cv::Point p(100,200);
p = p*0.3; // 结果p=(30,60)
cv::Point2f ptf(0.3f, 0.7f); cv::Point pti; pti = ptf; # 内部采用四舍五入取整 std::cout << "cv::Point2f: " << ptf.x << ", " << ptf.y << std::endl; std::cout << "cv::Point: " << pti.x << ", " << pti.y << std::endl;
cv::contourArea(contour) // contour.size必须>0,否则有assert报错
cv::selectROI() // 选择rect
cv::selectROIs() // 选择rect
以下代码测试了OpenCV的DFT,注意:
- 按照实部->[0]位置,虚部->[1]位置设置能正常运行,反之也能,为了不至于混淆,一律实部->[0],虚部->[0]。
- 此例程只测试了一行的效果,实际上单独一列算出来结果也一样正确。
const float fb = 5.1f; // 信号频率 const float T = 1.0f; // 总时间 const float fs = 32.0f; // 采样率 const float Ts = 1.0/fs; // 采样间隔 const int real_id = 0, imag_id = 1; cv::Mat x = cv::Mat::zeros(1, (int)std::round(T*fs), CV_32FC2); cv::Mat F = x.clone(); for (int k = 0; k < x.cols; k++) { const float t = k*Ts; x.at<cv::Vec2f>(0,k)[real_id] = std::cos(2*CV_PI*fb*t); } cv::dft(x, F, cv::DFT_SCALE); for (int k = 0; k < x.cols; k++) { printf("x(%03d) = %+08.3f + %+08.3f*i ", k, x.at<cv::Vec2f>(0,k)[real_id], x.at<cv::Vec2f>(0,k)[imag_id]); } for (int i = 0; i < F.cols; i++) { const int k = (i + (F.cols/2)) % F.cols; const float f = ((float)k/T < fs/2) ? ((float)k/T) : ((float)k/T - fs); const float real_part = F.at<cv::Vec2f>(0,k)[real_id]; const float imag_part = F.at<cv::Vec2f>(0,k)[imag_id]; printf("%03d# F(%+08.3f) = %+08.3f + %+08.3f*i = %+08.3f @%+08.3f度 ", k, f, real_part, imag_part, cv::norm(F.at<cv::Vec2f>(0,k)), CV_PI/180.0*atan2(imag_part, real_part)); }
读取参数文件最简洁案例如下,由于要加载、释放两次文件,所以速度比加载一次慢。 当然加载一次的方案实现起来没有这么简洁。
cv::FileStorage(“file_name.yml", cv::FileStorage::READ)["parameter_1"] >> parameter_1;
cv::FileStorage(“file_name.yml", cv::FileStorage::READ)["parameter_2"] >> parameter_2;
用opencv cv::FileStorage读取xml文件时,要求xml文件根目录必须是<opencv_storage>,否则会报错
cv::fillConvexPoly(image_to_fill, one_contour, cv::Scalar(255), cv::LINE_AA); // 填充one_contour包含区域,结果在image_to_fill中
cv::resize(img_src, img_linear, cv::Size(13, 13), 0, 0, cv::INTER_LINEAR); // 双线性
cv::resize(img_src, img_area, cv::Size(13, 13), 0, 0, cv::INTER_AREA ); // 取区域平均值
OpenCV在处理+-*/时,会自动处理饱和,如下代码中,如果dat为CV_8UC1,相减后的像素如果<0,则会置为0,不用担心溢出。
cv::Mat dat; // .... ohter code dat = dat - cv::mean(dat);