/** * colorReduce - reduce color number * * @param image - the image for processing * @param div - reduce factor */ voidcolorReduce(cv::Mat &image, int div=64) { int nl = image.rows; // number of lines int nc = image.cols * image.channels();
for (int j=0; j<nl; ++j) { uchar *data = image.ptr<uchar>(j); for (int i=0; i<nc; ++i) { // process each pixel data[i] = data[i] / div * div + div / 2; // end of pixel processing } } }
/** * colorReduce - reduce color number * * @param image - the image for processing * @param div - reduce factor */ voidcolorReduce(cv::Mat &image, int div=64) { int nl = image.rows; // number of lines int nc = image.cols * image.channels();
if (image.isContinuous()) { // then no padded pixels nc = nc * nl; nl = 1; // it is now a 1D array } // this loop is executed only once for (int j=0; j<nl; ++j) { uchar *data = image.ptr<uchar>(j); for (int i=0; i<nc; ++i) { // process each pixel data[i] = data[i] / div * div + div / 2; // end of pixel processing } } }
/** * colorReduce - reduce color number * * @param image - the image for processing * @param div - reduce factor */ voidcolorReduce(cv::Mat &image, int div=64) { cv::MatIterator_<cv::Vec3b> it = image.begin<cv::Vec3b>(); cv::MatIterator_<cv::Vec3b> itend = image.end<cv::Vec3b>();
// loop over all pixels for ( ; it!= itend; ++it) { (*it)[0] = (*it)[0] / div * div + div / 2; (*it)[1] = (*it)[1] / div * div + div / 2; (*it)[2] = (*it)[2] / div * div + div / 2; } }
Mat 的迭代器是一个随机访问迭代器,因此支持完整的迭代器算术运算,如 std::sort() 等。
// define image ROI cv::Mat imageROI; imageROI= image(cv::Rect(385,270,logo.cols,logo.rows)); // add logo to image cv::addWeighted(imageROI,1.0,logo,0.3,0.,imageROI);
doublethreshold(InputArray src, // input OutputArray dst, // output double thresh, // threshold value double maxval, // maximum value to use int type)// thresholding type
voidmorphologyEx(InputArray src, OutputArray dst, int op, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
voidcalcHist(const Mat* images, // source arrays int nimages, // number of source images constint* channels, // list of the dims channels InputArray mask, // optional mask OutputArray hist, // output mask int dims, // histogram dimensionality constint* histSize, // array of histogram sizes constfloat** ranges, // array of the dims arrays // of the histogram bin boundaries bool uniform=true, // is uniform or not bool accumulate=false )// accumulation flag.
// For gray-level images classHistogram1D { public: Histogram1D() { // Prepare arguments for 1D histogram histSize[0] = 256; hranges[0] = 0.0; hranges[1] = 255.0; ranges[0] = hranges; channels[0] = 0; // by default, we look at channel 0 } // Computes the 1D histogram. cv::MatND getHistogram(const cv::Mat &image){ cv::MatND hist; // Compute histogram cv::calcHist(&image, 1, // histogram from 1 image only channels, // the channel used cv::Mat(), // no mask is used hist, // the resulting histogram 1, // it is a 1D histogram histSize, // number of bins ranges // pixel value range ); return hist; } // Computes the 1D histogram and returns an image of it. cv::Mat getHistogramImage(const cv::Mat &image){ // Compute histogram first cv::MatND hist = getHistogram(image); // Get min and max bin values double maxVal = 0; double minVal = 0; cv::minMaxLoc(hist, &minVal, &maxVal, 0, 0); // Image on which to display histogram cv::Mat histImg(histSize[0], histSize[0], CV_8U, cv::Scalar(255)); // set highest point at 90% of nbins int hpt = static_cast<int>(0.9*histSize[0]); // Draw a vertical line for each bin for (int h = 0; h < histSize[0]; ++h) { float binVal = hist.at<float>(h); int intensity = static_cast<int>(binVal * hpt / maxVal); // This function draws a line between 2 points cv::line(histImg, cv::Point(h, histSize[0]), cv::Point(h, histSize[0]-intensity), cv::Scalar::all(0)); } return histImg; } private: int histSize[1]; // number of bins float hranges[2]; // min and max pixel value constfloat* ranges[1]; int channels[1]; // only 1 channel used here };
// For color BGR images classColorHistogram { public: ColorHistogram() { // Prepare arguments for color histogram histSize[0] = histSize[1] = histSize[2] = 256; hranges[0] = 0.0; // BGR rang hranges[1] = 255.0; // all channels have the same range ranges[0] = hranges; ranges[1] = hranges; ranges[2] = hranges; channels[0] = 0; // by default, we look at channel 0 } // Computes the 3D histogram. cv::MatND getHistogram(const cv::Mat &image){ cv::MatND hist; // Compute histogram cv::calcHist(&image, 1, // histogram from 1 image only channels, // the channel used cv::Mat(), // no mask is used hist, // the resulting histogram 3, // it is a color histogram histSize, // number of bins ranges // pixel value range ); return hist; }
// Compute the sparse color histogram. cv::SparseMat getSparseHistogram(const cv::Mat &image){ cv::SparseMat hist(3, histSize, CV_32F); // Compute histogram cv::calcHist(&image, 1, // histogram from 1 image only channels, // the channel used cv::Mat(), // no mask is used hist, // the resulting histogram 3, // it is a color histogram histSize, // number of bins ranges // pixel value range ); return hist; } private: int histSize[3]; // number of bins float hranges[2]; // min and max pixel value constfloat* ranges[3]; int channels[3]; // 3 channel used here };
voidcalcBackProject(const Mat* images, // source arrays int nimages, // number of source images constint* channels, // the list of channels InputArray hist, // input histogram OutputArray backProject,// destination back projection array constfloat** ranges, // array of arrays of the histogram // bin boundaries double scale=1, // scale bool uniform=true )// is uniform or not
例如,检测上图中类似云朵的部分,可以先使用 ROI 截取该图像中有云朵的部分作为目标图像:
1 2
cv::Mat imageROI; imageROI= image(cv::Rect(360,55,40,50)); // Cloud region
cv::calcBackProject(&image, 1, // one image channels, // the channels used histogram, // the histogram we are backprojecting result, // the resulting back projection image ranges, // the range of values, for each dimension 255.0// a scaling factor );
voidblur(InputArray src, // input OutputArray dst, // output Size ksize, // size of the square kernel Point anchor=Point(-1,-1), // anchor point int borderType=BORDER_DEFAULT )
voidSobel(InputArray src, // input OutputArray dst, // output int ddepth, // image type int dx, int dy, // kernell specification int ksize=3, // size of the square kernel double scale=1, // scale double delta=0, // offset int borderType=BORDER_DEFAULT )
\[ \angle { \mbox{grad}(f) }=\alpha tan\left( { -\frac { \partial { f } }{ \partial { y } } }/{ \frac { \partial { f } }{ \partial { x } } } \right) \]
OpenCV 提供了 cv::cartToPolar() 函数来获取梯度方向:
1 2 3 4 5 6
// Sobel must be computed in floating points cv::Sobel(image,sobelX,CV_32F,1,0); cv::Sobel(image,sobelY,CV_32F,0,1); // Compute the L2 norm and direction of the gradient cv::Mat norm, dir; cv::cartToPolar(sobelX,sobelY,norm,dir);
classLaplacianZC { private: // orignal image cv::Mat img; // 32-bit float image containing the Laplacian cv::Mat laplace; // Aperture size of the laplacian kernel int aperture; public: LaplacianZC() : aperture(3) {} // Set the aperture size of the kernel voidsetAperture(int a){ aperture = a; } // Compute the floating point Laplacian cv::Mat computeLaplacian(const cv::Mat &image){ // Compute Laplacian cv::Laplacian(image, laplace, CV_32F, aperture); // Keep local copy of the image // (used for zero-crossings) img = image.clone(); return laplace; } // Get the Laplacian result in 8-bit image // zero corresponds to gray level 128 // if no scale is provided, then the max value will be // scaled to intensity 255 // You must call computeLaplacian before calling this cv::Mat getLaplacianImage(double scale=-1.0){ if (scale<0){ double lapmin, lapmax; cv::minMaxLoc(laplace, &lapmin, &lapmax); scale = 127 / std::max(-lapmin, lapmax); } cv::Mat laplaceImage; laplace.convertTo(laplaceImage, CV_8U, scale, 128); return laplaceImage; } };
使用示例:
1 2 3 4 5
// Compute Laplacian using LaplacianZC class LaplacianZC laplacian; laplacian.setAperture(7); cv::Mat flap = laplacian.computeLaplacian(image); laplace = laplacian.getLaplacianImage();
// Get a binary image of the zero-crossings // if the product of the two adjascent pixels is // less than threshold then this zero-crossing // will be ignored cv::Mat getZeroCrossings(float threshold=1.0){
// Binary image initialize to white cv::Mat binary(laplace.size(),CV_8U,cv::Scalar(255)); cv::Mat_<uchar>::iterator itout= binary.begin<uchar>()+binary.step1();
// negate the input threshold value threshold *= -1.0; for ( ; it!= itend; ++it, ++itup, ++itout) { // if the product of two adjascent pixel is // negative then there is a sign change if (*it * *(it-1) < threshold) *itout= 0; // horizontal zero-crossing elseif (*it * *itup < threshold) *itout= 0; // vertical zero-crossing }
// Open image image= cv::imread("../group.jpg"); // define bounding rectangle // the pixels outside this rectangle // will be labeled as background cv::Rect rectangle(10,100,380,180);
之后可以调用 cv::grabCut() 函数:
1 2 3 4 5 6 7 8 9 10
cv::Mat result; // segemtation (4 possible values) cv::Mat bgModel, fgModel; // the models (internally used) // GrabCut segmentation cv::grabCut(image, // input image result, // segmentation result rectangle, // rectangle contain foreground bgModel, fgModel, // models 5, // number of iterations cv::GC_INIT_WITH_RECT // use rectangle );
得到的结果 result 将包含下面四种常量值:
cv::GC_BGD - 所有确定属于背景的像素(实际值为 0);
cv::GC_FGD - 所有确定属于前景的像素(实际值为 1);
cv::GC_PR_BGD - 所有可能属于背景的像素(实际值为 2);
cv::GC_PR_FGD - 所有可能属于前景的像素(实际值为 3)。
我们可以将所有可能是前景的像素提取出来:
1 2 3 4 5 6 7
// Get the pixels marked as likely foreground cv::compare(result, cv::GC_PR_FGD, result, cv::CMP_EQ); // Generate output image cv::Mat foreground(image.size(), CV_8UC3, cv::Scalar(255, 255, 255)); image.copyTo(foreground, // bg pixels are not copied result);
voidHoughLines(InputArray image, // 8-bit, single-channel binary source image OutputArray lines, // output vector of lines double rho, // distance resolution of the accumulator in pixels double theta, // angle resolution of the accumulator in radians int threshold, // accumulator threshold parameter double srn=0, // a divisor for rho double stn=0 )// a divisor for theta
// Apply Canny algorithm cv::Mat contours; cv::Canny(image, contours, 125, 350); // Hough transform for line detection std::vector<cv::Vec2f> lines; cv::HoughLines(test, lines, 1, PI/180, // step size 80); // minimum number of votes // Draw the detected lines std::vector<cv::Vec2f>::const_iterator it= lines.begin(); while (it!=lines.end()) { float rho= (*it)[0]; // first element is distance rho float theta= (*it)[1]; // second element is angle theta if (theta < PI/4. || theta > 3.*PI/4.) { // ~vertical line // point of intersection of the line with first row cv::Point pt1(rho/cos(theta),0); // point of intersection of the line with last row cv::Point pt2((rho-result.rows*sin(theta))/ cos(theta),result.rows); // draw a white line cv::line( image, pt1, pt2, cv::Scalar(255), 1); } else { // ~horizontal line // point of intersection of the // line with first column cv::Point pt1(0,rho/sin(theta)); // point of intersection of the line with last column cv::Point pt2(result.cols, (rho-result.cols*cos(theta))/sin(theta)); // draw a white line cv::line(image, pt1, pt2, cv::Scalar(255), 1); } ++it; }
voidHoughLinesP(InputArray image, // 8-bit, single-channel binary source image OutputArray lines, // output vector of lines double rho, // distance resolution of the accumulator in pixel double theta, // angle resolution of the accumulator in radians int threshold, // accumulator threshold parameter double minLineLength=0, // minimum line length double maxLineGap=0 )// maximum allowed gap
voidHoughCircles(InputArray image, // 8-bit, single-channel, grayscale input image OutputArray circles, // output vector of found circles int method, // detection method to use. double dp, // accumulator resolution (size of the image / 2) double minDist, // minimum distance between two circles double param1=100, // Canny high threshold double param2=100, // second method-specific parameter int minRadius=0, // minimum circle radius int maxRadius=0 )// minimum number of votes
// Smooth the image to reduce noise cv::GaussianBlur(image,image,cv::Size(5,5),1.5); std::vector<cv::Vec3f> circles; // Detect circles cv::HoughCircles(image, circles, CV_HOUGH_GRADIENT, 2, // accumulator resolution (size of the image / 2) 50, // minimum distance between two circles 200, // Canny high threshold 100, // minimum number of votes 25, 100); // min and max radius // Draw the circles std::vector<cv::Vec3f>::const_iterator itc= circles.begin(); while (itc!=circles.end()) { cv::circle(image, cv::Point((*itc)[0], (*itc)[1]), // circle centre (*itc)[2], // circle radius cv::Scalar(255),// color 2); // thickness ++itc; }
结果:
形状拟合
直线
OpenCV 提供了 cv::fitLine() 函数以根据一些点的集合拟合直线:
1 2 3 4 5 6
voidfitLine(InputArray points, // input vector of 2D or 3D points OutputArray line, // output vector of lines int distType, // distance type double param, // numerical parameter some types of distances double reps, // sufficient accuracy for the radius double aeps)// sufficient accuracy for the angle
示例:
1 2 3 4 5 6
cv::Vec4f line; cv::fitLine(cv::Mat(points),line, CV_DIST_L2, // distance type 0, // not used with L2 distance 0.01,0.01); // accuracy
voidfindContours(InputOutputArray image, // source, an 8-bit single-channel image. OutputArrayOfArrays contours, // detected contours OutputArray hierarchy, // optional output vector int mode, // contour retrieval mode int method, // contour approximation method Point offset=Point())// point offset
示例(只提取外部轮廓,不考虑内部轮廓):
1 2 3 4 5 6 7 8 9 10 11 12
// Find contours std::vector<std::vector<cv::Point>> contours; cv::findContours(image, contours, // a vector of contours CV_RETR_EXTERNAL, // retrieve the external contours CV_CHAIN_APPROX_NONE); // all pixels of each contours // Draw black contours on a white image cv::Mat result(image.size(),CV_8U,cv::Scalar(255)); cv::drawContours(result,contours, -1, // draw all contours cv::Scalar(0), // in black 2); // with a thickness of 2
// testing the bounding box cv::Rect r0= cv::boundingRect(cv::Mat(contours[0])); cv::rectangle(result,r0,cv::Scalar(0),2);
最小外接圆
1 2 3 4 5 6
// testing the enclosing circle float radius; cv::Point2f center; cv::minEnclosingCircle(cv::Mat(contours[1]),center,radius); cv::circle(result,cv::Point(center), static_cast<int>(radius),cv::Scalar(0),2);
最小外接多边形
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// testing the approximate polygon std::vector<cv::Point> poly; cv::approxPolyDP(cv::Mat(contours[2]), poly, 5, // accuracy of the approximation true); // yes it is a closed shape
// Iterate over each segment and draw it std::vector<cv::Point>::const_iterator itp= poly.begin(); while (itp!=(poly.end()-1)) { cv::line(result,*itp,*(itp+1),cv::Scalar(0),2); ++itp; } // last point linked to first point cv::line(result, *(poly.begin()), *(poly.end()-1),cv::Scalar(20),2);
凸包
1 2 3
// testing the convex hull std::vector<cv::Point> hull; cv::convexHull(cv::Mat(contours[3]),hull);
矩(moments)
1 2 3 4 5 6 7 8 9 10 11
// testing the moments // iterate over all contours itc= contours.begin(); while (itc!=contours.end()) { // compute all moments cv::Moments mom= cv::moments(cv::Mat(*itc++)); // draw mass center cv::circle(result, // position of mass center converted to integer cv::Point(mom.m10/mom.m00,mom.m01/mom.m00), 2,cv::Scalar(0),2); // draw black dot }