intmain() { // Open the video file cv::VideoCapture capture("../stomp.avi"); // check if video successfully opened if (!capture.isOpened()) return1; // Get the frame rate double rate = capture.get(CV_CAP_PROP_FPS);
boolstop(false); cv::Mat frame; // current video frame cv::namedWindow("Extracted Frame"); // Delay between each frame in ms // corresponds to video frame rate int delay = 1000 / rate; // for all frames in video while (!stop){ // read next frame if any if (!capture.read(frame)) break; cv::imshow("Extracted Frame", frame); // introduce a delay // or press key to stop if (cv::waitKey(delay) >= 0) stop = true; } // Close the video file // Not required since called by destructor capture.release(); }
也可以通过类似的方法读入摄像头捕捉的视频,要改动的地方仅仅是将上面的视频文件名改为摄像头的 ID,默认的摄像头 ID 为 0。
while (1) { Mat frame; bool bSuccess = cap.read(frame); if (!bSuccess){ // if not success, break loop cout << "ERROR: cannot read a frame from video file" << endl; break; } oVideoWriter.write(frame); imshow("MyVideo", frame); // show the frame in "MyVideo" window
if(waitKey(10) == 27) {// wait for ESC key cout << "ESC key is pressed by user" << endl; break; } } return0; }
// Computes the 1D Hue histogram with a mask. // BGR source image is converted to HSV cv::MatND getHueHistogram(const cv::Mat &image, int minSaturation = 0){
cv::MatND hist;
// Convert to HSV color space cv::Mat hsv; cv::cvtColor(image, hsv, CV_BGR2HSV);
// Mask to be used (or not) cv::Mat mask;
if (minSaturation > 0) { // Spliting the 3 channels into 3 images std::vector<cv::Mat> v; cv::split(hsv, v); // Mask out the low saturated pixels cv::threshold(v[1], mask, minSaturation, 255, cv::THRESH_BINARY); }
// Prepare arguments for a 1D hue histogram hranges[0]= 0.0; hranges[1]= 180.0; channels[0]= 0; // the hue channel
// Compute histogram cv::calcHist(&hsv, 1, // histogram of 1 image only channels, // the channel used mask, // no mask is used hist, // the resulting histogram 1, // it is a 1D histogram histSize, // number of bins ranges // pixel value range );
voidgoodFeaturesToTrack(InputArray image, // input 8-bit or floating-point 32-bit, single-channel image OutputArray corners, // output vector of detected corners. int maxCorners, // maximum number of corners to return double qualityLevel, // quality level double minDistance, // minimum allowed distance between points InputArray mask=noArray(),// optional region of interest int blockSize=3, // size of an average block bool useHarrisDetector=false, // whether to use a classical Harris detector double k=0.04 )// free parameter of the Harris detector
示例:
1 2 3 4 5 6
// Compute good features to track std::vector<cv::Point2f> corners; cv::goodFeaturesToTrack(imaeg, corners, 500, // maximum number of corners to return 0.01, // quality level 10); // minimum allowed distance between points
// vector of keypoints std::vector<cv::KeyPoint> keypoints; // Construction of the Good Feature to Track detector cv::GoodFeatureToTrackDetector gftt( 500, // maximum number of corners to be returned 0.01, // quality level 10); // minimum allowed distance between points // point detection using FeatureDetector method gftt.detect(image, keypoints);
cv::drawKeyPoints(image, // original image keypoints, // vector of keypoints image, // the output image cv::Scalar(255, 255, 255), // keypoint color cv::DrawMatchesFlags::DRAW_OVER_OUTIMG); // drawing flag
这些类的使用方法大同小异。
FAST 特征点
FAST 特征点是为了提高检测速度而设计的。使用示例:
1 2 3 4 5 6 7 8 9 10 11 12 13
// vector of keypoints std::vector<cv::KeyPoint> keypoints; // Construction of the Fast feature detector object cv::FastFeatureDetector fast( 40); // threshold for detection // feature point detection fast.detect(image, keypoints); // Draw key points cv::drawKeyPoints(image, // original image keypoints, // vector of keypoints image, // the output image cv::Scalar(255, 255, 255), // keypoint color cv::DrawMatchesFlags::DRAW_OVER_OUTIMG); // drawing flag
SURF 特征点
SURF(Speeded Up Robust Features) 特征点是一种尺度不变,且运算效率快的特征点。
示例:
1 2 3 4 5 6 7 8 9 10 11 12 13
// vector of keypoints std::vector<cv::KeyPoint> keypoints; // Construct the SURF feature detector object cv::SurfFeatureDetector surf( 2500.); // threshold // Detect the SURF features surf.detect(image, keypoints); // Draw the keypoints with scale and orientation information cv::drawKeypoints(image, // original image keypoints, // vector of keypoints featureImage, // the resulting image cv::Scalar(255,255,255), // color of the points cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS); //flag
// Construction of the SURF descriptor extractor cv::SurfDescriptorExtractor surfDesc; // Extraction of the SURF descriptors cv::Mat descriptors1; surfDesc.compute(image1,keypoints1,descriptors1);
// Construction of the matcher cv::BruteForceMatcher<cv::L2<float>> matcher; // Match the two image descriptors std::vector<cv::DMatch> matches; matcher.match(descriptors1,descriptors2, matches);
std::nth_element(matches.begin(), // initial position matches.begin()+24, // position of the sorted element matches.end()); // end position // remove all elements after the 25th matches.erase(matches.begin()+25, matches.end()); // Visualize matches cv::Mat imageMatches; cv::drawMatches( image1,keypoints1, // 1st image and its keypoints image2,keypoints2, // 2nd image and its keypoints matches, // the matches imageMatches, // the image produced cv::Scalar(255,255,255)); // color of the lines
boolfindChessboardCorners(InputArray image, // source chessboard view Size patternSize, // number of inner corners per a chessboard row and column OutputArray corners, // output array of detected corners int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE )
// output vectors of image points std::vector<cv::Point2f> imageCorners; // number of corners on the chessboard cv::Size boardSize(6, 4); // Get the chessboard corners bool found = cv::findChessboardCorners(image, boardSize, imageCorners);
绘制检测得到的结点
OpenCV 也提供了一个函数用于将检测到的结点画到棋盘图像上:
1 2 3 4
// Draw the corners cv::drawChessboardCorners(image, boardSize, imageCorners, found); // corners have been found
doublecalibrateCamera(InputArrayOfArrays objectPoints, // a vector of vectors of calibration pattern points InputArrayOfArrays imagePoints, // a vector of vectors of the projections of calibration pattern points. Size imageSize, // size of the image InputOutputArray cameraMatrix, // output 3x3 floating-point camera matrix InputOutputArray distCoeffs, // output vector of distortion coefficients OutputArrayOfArrays rvecs, // output vector of rotation vectors OutputArrayOfArrays tvecs, // output vector of translation vectors e int flags=0, // flags TermCriteria criteria=TermCriteria( // termination criteria TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) )
// input points std::vector<std::vector<cv::Point3f>> objectPoints; std::vector<std::vector<cv::Point2f>> imagePoints; // output Matrices cv::Mat cameraMatrix; cv::Mat distCoeffs; // flag to specify how calibration is done int flag; // used in image undistortion cv::Mat map1,map2; bool mustInitUndistort;
// Open the chessboard images and extract corner points intaddChessboardPoints(const std::vector<std::string>& filelist, cv::Size & boardSize); // Add scene points and corresponding image points voidaddPoints(const std::vector<cv::Point2f>& imageCorners, const std::vector<cv::Point3f>& objectCorners); // Calibrate the camera doublecalibrate(cv::Size &imageSize); // Set the calibration flag voidsetCalibrationFlag(bool radial8CoeffEnabled=false, bool tangentialParamEnabled=false); // Remove distortion in an image (after calibration) cv::Mat remap(const cv::Mat &image);
// Create calibrator object CameraCalibrator cameraCalibrator; // add the corners from the chessboard cv::Size boardSize(6,4); cameraCalibrator.addChessboardPoints( filelist, // filenames of chessboard image boardSize); // size of chessboard // calibrate the camera // cameraCalibrator.setCalibrationFlag(true,true); cameraCalibrator.calibrate(image.size());
由图可以看出,我们可以通过直接连接一个三维空间点 X 和左侧相机的中心点来找到其在左侧相机的成像中的位置 x 。反过来考虑,一个成像点 x 所对应的实际三维空间点的位置一定在这条直线上。这意味着,如果我们需要找到点 x 在第二台相机的成像位置 I’,我们需要找到第一条线在第二个相机的投影位置,这条线称为点 x 的 核线 (epipolar line)。核线的角度取决于两个相机的相对位置。实际上,核线的位置与整个双目视觉系统的几何形状有关。
另一个可以观察得到的是所有核线都会经过一点 (图中的 e 或 e’),这个点是一个相机中心点在另一个相机的投影位置,这个特殊的点称为 核点 (epipole)。
从数学角度上看,一个成像点和其对应的核线可以使用如下的 3x3 的矩阵来表示:
\[\begin{bmatrix} l'_{1} \\ l'_{2} \\ l'_{3} \end{bmatrix} = F \begin{bmatrix} x \\ y \\ 1 \end{bmatrix} \]
Mat findFundamentalMat(InputArray points1, // array of N points from the first image InputArray points2, // array of N points from the second image. int method=FM_RANSAC, // method for computing a fundamental matrix double param1=3., // parameter used for RANSAC double param2=0.99, // parameter used for RANSAC or LMedS methods only OutputArray mask=noArray() )// output array of N elements
// Compute F matrix from 7 matches cv::Mat fundamental = cv::findFundamentalMat( cv::Mat (selPoints1), // points in first image cv::Mat (selPoints2), // points in second image );
// draw the left points corresponding epipolar // lines in right image std::vector<cv::Vec3f> lines1; cv::computeCorrespondEpilines( cv::Mat(selPoints1), // image points 1, // in image 1 (can also be 2) fundemental, // F matrix lines1); // vector of epipolar lines // for all epipolar lines for (vector<cv::Vec3f>::const_iterator it= lines1.begin(); it!=lines1.end(); ++it) { // draw the line between first and last column cv::line(image2, cv::Point(0,-(*it)[2]/(*it)[1]), cv::Point(image2.cols,-((*it)[2]+ (*it)[0]*image2.cols)/(*it)[1]), cv::Scalar(255,255,255)); }
值得注意的是,默认得到的单应矩阵如果用来将第一张图的视角变到第二张图的视角,应该使用这个单应矩阵的逆矩阵。因此,cv::warpPerspective() 函数默认会先计算传入的单应矩阵的逆矩阵,然后用这个逆矩阵来变形。如果传入的矩阵已经是单应矩阵的逆矩阵,则可以在调用时指定 flag 参数为 cv::WARP_INVERSE_MAP 。
之后,可以把另一张图和这张已经处在同一视点的图拼接在一起:
1 2 3
// Copy image 1 on the first half of full image cv::Mat half(result, cv::Rect(0, 0, image2.cols, image2.rows)); image2.copyTo(half); // copy image2 to image1 roi