OpenCV - RobustMatcher usando findHomography

Eu implementei um par robusto encontrado na internet com base em diferentes testes: teste de simetria, teste de proporção e teste RANSAC. Isso funciona bem. Eu usei entãofindHomography para ter boas correspondências.

Aqui o código:

    RobustMatcher::RobustMatcher() : ratio(0.65f), refineF(true),confidence(0.99), distance(3.0) {
           detector = new cv::SurfFeatureDetector(400); //Better than ORB
           //detector = new cv::SiftFeatureDetector; //Better than ORB
           //extractor= new cv::OrbDescriptorExtractor();
           //extractor= new cv::SiftDescriptorExtractor;
           extractor= new cv::SurfDescriptorExtractor;
          // matcher= new cv::FlannBasedMatcher;
           matcher= new cv::BFMatcher();

    }
    // Clear matches for which NN ratio is > than threshold
    // return the number of removed points
    // (corresponding entries being cleared,
    // i.e. size will be 0)
    int RobustMatcher::ratioTest(std::vector<std::vector<cv::DMatch> >
                                                 &matches) {
      int removed=0;
        // for all matches
      for (std::vector<std::vector<cv::DMatch> >::iterator
               matchIterator= matches.begin();
           matchIterator!= matches.end(); ++matchIterator) {
             // if 2 NN has been identified
             if (matchIterator->size() > 1) {
                 // check distance ratio
                 if ((*matchIterator)[0].distance/
                     (*matchIterator)[1].distance > ratio) {
                    matchIterator->clear(); // remove match
                    removed++;
                 }
             } else { // does not have 2 neighbours
                 matchIterator->clear(); // remove match
                 removed++;
             }
      }
      return removed;
    }

    // Insert symmetrical matches in symMatches vector
    void RobustMatcher::symmetryTest(
        const std::vector<std::vector<cv::DMatch> >& matches1,
        const std::vector<std::vector<cv::DMatch> >& matches2,
        std::vector<cv::DMatch>& symMatches) {
      // for all matches image 1 -> image 2
      for (std::vector<std::vector<cv::DMatch> >::
               const_iterator matchIterator1= matches1.begin();
           matchIterator1!= matches1.end(); ++matchIterator1) {
         // ignore deleted matches
         if (matchIterator1->size() < 2)
             continue;
         // for all matches image 2 -> image 1
         for (std::vector<std::vector<cv::DMatch> >::
            const_iterator matchIterator2= matches2.begin();
             matchIterator2!= matches2.end();
             ++matchIterator2) {
             // ignore deleted matches
             if (matchIterator2->size() < 2)
                continue;
             // Match symmetry test
             if ((*matchIterator1)[0].queryIdx ==
                 (*matchIterator2)[0].trainIdx &&
                 (*matchIterator2)[0].queryIdx ==
                 (*matchIterator1)[0].trainIdx) {
                 // add symmetrical match
                   symMatches.push_back(
                     cv::DMatch((*matchIterator1)[0].queryIdx,
                               (*matchIterator1)[0].trainIdx,
                               (*matchIterator1)[0].distance));
                   break; // next match in image 1 -> image 2
             }
         }
      }
    }

    // Identify good matches using RANSAC
    // Return fundemental matrix
    cv::Mat RobustMatcher::ransacTest(const std::vector<cv::DMatch>& matches,const std::vector<cv::KeyPoint>& keypoints1,
            const std::vector<cv::KeyPoint>& keypoints2,
            std::vector<cv::DMatch>& outMatches) {
        // Convert keypoints into Point2f
        std::vector<cv::Point2f> points1, points2;
        cv::Mat fundemental;
        for (std::vector<cv::DMatch>::const_iterator it= matches.begin();it!= matches.end(); ++it) {
            // Get the position of left keypoints
            float x= keypoints1[it->queryIdx].pt.x;
            float y= keypoints1[it->queryIdx].pt.y;
            points1.push_back(cv::Point2f(x,y));
            // Get the position of right keypoints
            x= keypoints2[it->trainIdx].pt.x;
            y= keypoints2[it->trainIdx].pt.y;
            points2.push_back(cv::Point2f(x,y));
        }
        // Compute F matrix using RANSAC
        std::vector<uchar> inliers(points1.size(),0);
        if (points1.size()>0&&points2.size()>0){
            cv::Mat fundemental= cv::findFundamentalMat(
                        cv::Mat(points1),cv::Mat(points2), // matching points
                        inliers,       // match status (inlier or outlier)
                        CV_FM_RANSAC, // RANSAC method
                        distance,      // distance to epipolar line
                        confidence); // confidence probability
            // extract the surviving (inliers) matches
            std::vector<uchar>::const_iterator itIn= inliers.begin();
            std::vector<cv::DMatch>::const_iterator itM= matches.begin();
            // for all matches
            for ( ;itIn!= inliers.end(); ++itIn, ++itM) {
                if (*itIn) { // it is a valid match
                    outMatches.push_back(*itM);
                }
            }
            if (refineF) {
                // The F matrix will be recomputed with
                // all accepted matches
                // Convert keypoints into Point2f
                // for final F computation
                points1.clear();
                points2.clear();
                for (std::vector<cv::DMatch>::const_iterator it= outMatches.begin();it!= outMatches.end(); ++it) {
                    // Get the position of left keypoints
                    float x= keypoints1[it->queryIdx].pt.x;
                    float y= keypoints1[it->queryIdx].pt.y;
                    points1.push_back(cv::Point2f(x,y));
                    // Get the position of right keypoints
                    x= keypoints2[it->trainIdx].pt.x;
                    y= keypoints2[it->trainIdx].pt.y;
                    points2.push_back(cv::Point2f(x,y));
                }
                // Compute 8-point F from all accepted matches
                if (points1.size()>0&&points2.size()>0){
                    fundemental= cv::findFundamentalMat(cv::Mat(points1),cv::Mat(points2), // matches
                               CV_FM_8POINT); // 8-point method
                }
            }
        }
        return fundemental;
    }

    // Match feature points using symmetry test and RANSAC
    // returns fundemental matrix
    cv::Mat RobustMatcher::match(cv::Mat& image1,
        cv::Mat& image2, // input images
       // output matches and keypoints
       std::vector<cv::DMatch>& matches,
       std::vector<cv::KeyPoint>& keypoints1,
       std::vector<cv::KeyPoint>& keypoints2) {
     if (!matches.empty()){
         matches.erase(matches.begin(),matches.end());
     }
     // 1a. Detection of the SIFT features
     detector->detect(image1,keypoints1);
     detector->detect(image2,keypoints2);
     // 1b. Extraction of the SIFT descriptors
     /*cv::Mat img_keypoints;
     cv::Mat img_keypoints2;
     drawKeypoints( image1, keypoints1, img_keypoints, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
     drawKeypoints( image2, keypoints2, img_keypoints2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
     //-- Show detected (drawn) keypoints
     //cv::imshow("Result keypoints detected", img_keypoints);
    // cv::imshow("Result keypoints detected", img_keypoints2);

     cv::waitKey(5000);*/

     cv::Mat descriptors1, descriptors2;
     extractor->compute(image1,keypoints1,descriptors1);
     extractor->compute(image2,keypoints2,descriptors2);
     // 2. Match the two image descriptors
     // Construction of the matcher
     //cv::BruteForceMatcher<cv::L2<float>> matcher;
     // from image 1 to image 2
     // based on k nearest neighbours (with k=2)
     std::vector<std::vector<cv::DMatch> > matches1;
     matcher->knnMatch(descriptors1,descriptors2,
         matches1, // vector of matches (up to 2 per entry)
         2);        // return 2 nearest neighbours
      // from image 2 to image 1
      // based on k nearest neighbours (with k=2)
      std::vector<std::vector<cv::DMatch> > matches2;
      matcher->knnMatch(descriptors2,descriptors1,
         matches2, // vector of matches (up to 2 per entry)
         2);        // return 2 nearest neighbours
      // 3. Remove matches for which NN ratio is
      // > than threshold
      // clean image 1 -> image 2 matches
      int removed= ratioTest(matches1);
      // clean image 2 -> image 1 matches
      removed= ratioTest(matches2);
      // 4. Remove non-symmetrical matches
      std::vector<cv::DMatch> symMatches;
      symmetryTest(matches1,matches2,symMatches);
      // 5. Validate matches using RANSAC
      cv::Mat fundemental= ransacTest(symMatches,
                  keypoints1, keypoints2, matches);
      // return the found fundemental matrix
      return fundemental;
    }




            cv::Mat img_matches;

            drawMatches(image1, keypoints_img1,image2, keypoints_img2,
                                 matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
                    std::cout << "Number of good matching " << (int)matches.size() << "\n" << endl;

                    if ((int)matches.size() > 5 ){
                        Debug::info("Good matching !");
                    }
                    //-- Localize the object
                    std::vector<Point2f> obj;
                    std::vector<Point2f> scene;

                    for( int i = 0; i < matches.size(); i++ )
                    {
                      //-- Get the keypoints from the good matches
                      obj.push_back( keypoints_img1[ matches[i].queryIdx ].pt );
                      scene.push_back( keypoints_img2[matches[i].trainIdx ].pt );
                    }
                    cv::Mat arrayRansac;
                    std::vector<uchar> inliers(obj.size(),0);
                    Mat H = findHomography( obj, scene, CV_RANSAC,3,inliers);


                    //-- Get the corners from the image_1 ( the object to be "detected" )
                    std::vector<Point2f> obj_corners(4);
                    obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( image1.cols, 0 );
                    obj_corners[2] = cvPoint( image1.cols, image1.rows ); obj_corners[3] = cvPoint( 0, image1.rows );
                    std::vector<Point2f> scene_corners(4);


                    perspectiveTransform( obj_corners, scene_corners, H);


                    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
                    line( img_matches, scene_corners[0] + Point2f( image1.cols, 0), scene_corners[1] + Point2f( image1.cols, 0), Scalar(0, 255, 0), 4 );
                    line( img_matches, scene_corners[1] + Point2f( image1.cols, 0), scene_corners[2] + Point2f( image1.cols, 0), Scalar( 0, 255, 0), 4 );
                    line( img_matches, scene_corners[2] + Point2f( image1.cols, 0), scene_corners[3] + Point2f( image1.cols, 0), Scalar( 0, 255, 0), 4 );
                    line( img_matches, scene_corners[3] + Point2f( image1.cols, 0), scene_corners[0] + Point2f( image1.cols, 0), Scalar( 0, 255, 0), 4 );



    }

</pre><code>

Eu tenho resultados como este (a homografia é boa):

Mas não entendo por que, para alguns dos meus resultados em que a correspondência é boa, tenho esse tipo de resultado (a homografia não parece ser boa):

Alguém pode me explicar? Talvez eu tenha que ajustar os parâmetros? Mas se eu reduzir restrições (aumentar a proporção, por exemplo), em vez de não ter correspondência entre duas fotos (isso é bom), tenho muita correspondência ... E não quero. Além disso, a homografia não funciona (eu tenho uma linha verde apenas como acima).

E, inversamente, meu robusto fósforo funciona (também) bem, ou seja, para diferentes figuras iguais (apenas giradas, diferentes escalas, etc.), isso funciona bem, mas quando tenho duas imagens semelhantes, não tenho nenhuma correspondência ...

Então não sei como posso fazer um bom cálculo. Eu sou um iniciante. O fósforo robusto funciona bem, mas para a mesma imagem, mas para duas imagens semelhantes, como acima, não funciona e isso é um problema.

Talvez eu esteja no caminho errado.

Antes de postar esta mensagem, é claro que li muito no Stack, mas não encontrei a resposta. (Por exemploAqui)

questionAnswers(1)

yourAnswerToTheQuestion