Click here to Skip to main content
15,881,882 members
Please Sign up or sign in to vote.
0.00/5 (No votes)
See more:
I need to be able to loop matching an roi against multiple images entered as args. The code then has to warp the images with homography so that they match the rotation and scale of the roi. After each image is mapped it needs to be saved into an imagelist to be altered later. I think the imagelist control is like a thumbnail window.

XML
#include <cv.h>
#include <opencv\highgui.h>
#include <iostream>
#include <conio.h>
#include <opencv2\nonfree\features2d.hpp>
#include <opencv2\legacy\legacy.hpp>
#include <opencv2\core\core.hpp>
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>

using namespace cv;
using namespace std;

int main( int argc, char** argv ) {

int roix1 = argc[1];
int roiy1 = argc[2];
int roix2 = argc[3];
int roiy2 = argc[4];
std::vector<Mat> imglist;
cv::Mat img_1 = cv::imread( argv[5], CV_LOAD_IMAGE_GRAYSCALE );
     Mat img_roi = img_1(Rect(roix1 ,roiy1, roix2-roix1, roiy2-roiy1));

for(int i = 6; i < argc; i++)
{
cv::Mat img_2 = cv::imread( argv[i], CV_LOAD_IMAGE_GRAYSCALE );
    //argv[i] is the argument at index i


  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_ROI, keypoints_Images;

    detector.detect( img_roi , keypoints_ROI );
    detector.detect( img_2, keypoints_Images );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_ROI, descriptors_Images;

    extractor.compute( img_roi, keypoints_ROI, descriptors_ROI );
    extractor.compute( img_2, keypoints_Images, descriptors_Images );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_ROI, descriptors_Images, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_ROI.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );

  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_ROI.rows; i++ )
  { if( matches[i].distance < 3*min_dist )
    { good_matches.push_back( matches[i]); }
  }

  Mat img_matches;
  drawMatches( img_roi, keypoints_ROI, img_2, keypoints_Images,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );


  //-- Localize the object from img_1 in img_2
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( size_t i = 0; i < good_matches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_ROI[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_Images[ good_matches[i].trainIdx ].pt );
  }

  Mat H = findHomography( obj, scene, RANSAC );

  //-- Get the corners from the ROI
  std::vector<Point2f> obj_corners(4);
  obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_roi.cols, 0 );
  obj_corners[2] = Point( img_roi.cols, img_roi.rows ); obj_corners[3] = Point( 0, img_roi.rows );
  std::vector<Point2f> scene_corners(4);

  perspectiveTransform( obj_corners, scene_corners, H);

      Mat transformedImage;
    warpPerspective(img_2,transformedImage,H,Size(img_2.cols*2,img_2.rows*2));


      imglist.put(transformedImage);
  //-- Draw lines between the corners in Img 2
  Point2f offset( (float)img_roi.cols, 0);
  line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
  line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );

  //-- Show detected matches
  imshow( "Good Matches & Object detection", img_matches );

int c = cvWaitKey(10);
        //If 'ESC' is pressed, break the loop
        if((char)c==27 ) break;
}
    _getch();
    return 0;
}
Posted
Updated 29-Apr-14 21:23pm
v3

1 solution

You are almost there!
Use the following code to extract the ROI.
C++
Mat img_roi = img_1(Rect(roix1 ,roiy1, roix2-roix1, roiy2-roiy1));

The following code creates a vector to keep the list of images which are transformed.
C++
std::vector<mat> imglist;</mat>

Then change the following two lines
C++
detector.detect( img_1, keypoints_ROI );
detector.detect( img_scene, keypoints_Images );

to
C++
detector.detect( img_roi , keypoints_ROI );
detector.detect( img_2, keypoints_Images );

Then change the following lines
C++
extractor.compute( img_object, keypoints_ROI, descriptors_ROI );
extractor.compute( img_2, keypoints_Images, descriptors_Images );

to
C++
extractor.compute( img_roi, keypoints_ROI, descriptors_ROI );
extractor.compute( img_2, keypoints_Images, descriptors_Images );

After find the homography warp the image according to the perspective transformation matrix using the following code.
C++
Mat transformedImage;
warpPerspective(img_2,transformedImage,H,Size(img_2.cols*2,img_2.rows*2));

The size of the transformed image should be some what large otherwise the transformed image may be cropped.
Put the transformed image in the list using the following code.
C++
imglist.put(transformedImage);
 
Share this answer
 
v2

This content, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)



CodeProject, 20 Bay Street, 11th Floor Toronto, Ontario, Canada M5J 2N8 +1 (416) 849-8900