Switch to unified view

a b/src/automated_registration.cpp
1
#include <Rcpp.h>
2
3
// OpenCV
4
#include <opencv2/opencv.hpp>
5
#include "opencv2/xfeatures2d.hpp"
6
#include "opencv2/features2d.hpp"
7
#include "opencv2/shape/shape_transformer.hpp"
8
// #include <opencv2/imgproc.hpp>
9
10
// Internal functions
11
#include "auxiliary.h"
12
#include "image.h"
13
14
// Namespaces
15
using namespace Rcpp;
16
using namespace std;
17
using namespace cv;
18
using namespace cv::xfeatures2d;
19
20
// check if keypoints are degenerate
21
std::string check_degenerate(std::vector<cv::KeyPoint> keypoints1, std::vector<cv::KeyPoint> keypoints2) {
22
23
  // get sd
24
  double keypoints1_sd = cppSD(keypoints1);
25
  double keypoints2_sd = cppSD(keypoints2);
26
  
27
  // get warning message
28
  std::string message;
29
  if(keypoints1_sd < 1.0 | keypoints2_sd < 1.0){
30
    message = "degenarate";
31
    Rcout << "WARNING: points may be in a degenerate configuration." << endl;
32
  } else {
33
    message = "not degenarate";
34
  }
35
36
  return message;
37
}
38
39
// // check transformation of points
40
// std::string check_transformation_by_pts_mean_sqrt(std::vector<cv::KeyPoint> keypoints1, std::vector<cv::KeyPoint> keypoints2, Mat &h, Mat &mask){
41
// 
42
//   // perspective transformation of query keypoints
43
//   std::vector<cv::Point2f> keypoints1_reg;
44
//   cv::perspectiveTransform(keypoints1, keypoints1_reg, h);
45
// 
46
// }
47
48
// check distribution of registered points
49
std::string check_transformation_by_point_distribution(Mat &im, Mat &h){
50
51
  // get image shape
52
  int height = im.rows;
53
  int width = im.cols;
54
  int height_interval = (double) height/50.0;
55
  int width_interval = (double) width/50.0;
56
  
57
  // perspective transformation of grid points
58
  std::vector<cv::Point2f> gridpoints;
59
  for (double i = 0.0; i <= height; i += height_interval) {
60
    for (double j = 0.0; j <= width; j += width_interval) {
61
      gridpoints.push_back(cv::Point2f(j,i));
62
    }
63
  }
64
  
65
  // register grid points
66
  std::vector<cv::Point2f> gridpoints_reg;
67
  cv::perspectiveTransform(gridpoints, gridpoints_reg, h);
68
69
  // Compute the standard deviation of the transformed points
70
  double gridpoints_reg_sd = cppSD(gridpoints_reg);
71
72
  // get warning message
73
  std::string message;
74
  if(gridpoints_reg_sd < 1.0 | gridpoints_reg_sd > max(height, width)){
75
    message = "large distribution";
76
    Rcout << "WARNING: Transformation may be poor - transformed points grid seem to be concentrated!" << endl;
77
  } else {
78
    message = "small distribution";
79
  }
80
81
  return message;
82
}
83
84
85
// do overall checks on keypoints and images
86
std::string check_transformation_metrics(std::vector<cv::KeyPoint> keypoints1, std::vector<cv::KeyPoint> keypoints2, Mat &im1, Mat &im2, Mat &h, Mat &mask) {
87
  
88
  // check keypoint standard deviation
89
  std::string degenerate;
90
  degenerate = check_degenerate(keypoints1, keypoints2);
91
  
92
  //  check transformation
93
  // std::string transformation;
94
  // transformation = check_transformation_by_pts_mean_sqrt(keypoints1, keypoints2, h, mask);
95
  
96
  //  check distribution
97
  std::string distribution;
98
  distribution = check_transformation_by_point_distribution(im2, h);
99
    
100
  return degenerate;
101
}
102
103
// get good matching keypoints
104
void getGoodMatches(std::vector<std::vector<DMatch>> matches, std::vector<DMatch> &good_matches, const float lowe_ratio = 0.8)
105
{
106
  for (size_t i = 0; i < matches.size(); i++) {
107
    if (matches[i][0].distance < lowe_ratio * matches[i][1].distance) {
108
      good_matches.push_back(matches[i][0]);
109
    }
110
  }
111
}
112
113
// remove duplicate keypoints for TPS
114
void removeCloseMatches(std::vector<cv::Point2f>& points1, std::vector<cv::Point2f>& points2, float threshold = std::numeric_limits<float>::epsilon()) {
115
  
116
  // Create a vector to store filtered points
117
  std::vector<cv::Point2f> filtered_points1;
118
  std::vector<cv::Point2f> filtered_points2;
119
  
120
  // Iterate through the points
121
  for (size_t i = 0; i < points1.size(); ++i) {
122
    bool is_close = false;
123
    
124
    // Compare the current point with all already filtered points
125
    for (size_t j = 0; j < filtered_points1.size(); ++j) {
126
      float dist_squared1 = (points1[i].x - filtered_points1[j].x) * (points1[i].x - filtered_points1[j].x) +
127
        (points1[i].y - filtered_points1[j].y) * (points1[i].y - filtered_points1[j].y);
128
      float dist_squared2 = (points2[i].x - filtered_points2[j].x) * (points2[i].x - filtered_points2[j].x) +
129
        (points2[i].y - filtered_points2[j].y) * (points2[i].y - filtered_points2[j].y);
130
      
131
      // If the distance is less than the threshold (considered close), break
132
      if (dist_squared1 < threshold | dist_squared2 < threshold) {
133
        is_close = true;
134
        break;
135
      }
136
    }
137
    
138
    // If no close point was found, add the current point to the filtered list
139
    if (!is_close) {
140
      filtered_points1.push_back(points1[i]);
141
      filtered_points2.push_back(points2[i]);
142
    }
143
  }
144
  
145
  // Update the original point set with the filtered points
146
  points1 = filtered_points1;
147
  points2 = filtered_points2;
148
}
149
150
// align images with BRUTE FORCE algorithm
151
void alignImagesBRUTE(Mat &im1, Mat &im2, Mat &im1Reg, Mat &im1Overlay, Mat &imMatches, Mat &h, const float GOOD_MATCH_PERCENT, const int MAX_FEATURES)
152
{
153
  // Convert images to grayscale
154
  Mat im1Gray, im2Gray;
155
  cvtColor(im1, im1Gray, cv::COLOR_BGR2GRAY);
156
  cvtColor(im2, im2Gray, cv::COLOR_BGR2GRAY);
157
158
  // Variables to store keypoints and descriptors
159
  std::vector<KeyPoint> keypoints1, keypoints2;
160
  Mat descriptors1, descriptors2;
161
162
  // Detect ORB features and compute descriptors.
163
  Ptr<Feature2D> orb = ORB::create(MAX_FEATURES);
164
  orb->detectAndCompute(im1Gray, Mat(), keypoints1, descriptors1);
165
  orb->detectAndCompute(im2Gray, Mat(), keypoints2, descriptors2);
166
  Rcout << "DONE: orb based key-points detection and descriptors computation" << endl;
167
168
  // Match features.
169
  std::vector<DMatch> matches;
170
  Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
171
  matcher->match(descriptors1, descriptors2, matches, Mat());
172
  Rcout << "DONE: BruteForce-Hamming - descriptor matching" << endl;
173
174
  // Sort matches by score
175
  std::sort(matches.begin(), matches.end());
176
177
  // Remove not so good matches
178
  const int numGoodMatches = matches.size() * GOOD_MATCH_PERCENT;
179
  matches.erase(matches.begin()+numGoodMatches, matches.end());
180
  Rcout << "DONE: get good matches by distance thresholding" << endl;
181
182
  // Extract location of good matches
183
  std::vector<Point2f> points1, points2;
184
  for( size_t i = 0; i < matches.size(); i++ )
185
  {
186
    points1.push_back( keypoints1[ matches[i].queryIdx ].pt );
187
    points2.push_back( keypoints2[ matches[i].trainIdx ].pt );
188
  }
189
190
  // Find homography
191
  h = findHomography(points1, points2, RANSAC);
192
  Rcout << "DONE: calculated homography matrix" << endl;
193
194
  // Extract location of good matches in terms of keypoints
195
  std::vector<KeyPoint> keypoints1_best, keypoints2_best;
196
  std::vector<cv::DMatch> goodMatches;
197
  for( size_t i = 0; i < matches.size(); i++ )
198
  {
199
    keypoints1_best.push_back(keypoints1[matches[i].queryIdx]);
200
    keypoints2_best.push_back(keypoints2[matches[i].trainIdx]);
201
    goodMatches.push_back(cv::DMatch(static_cast<int>(i), static_cast<int>(i), 0));
202
  }
203
204
  // Draw top matches and good ones only
205
  // Mat imMatches;
206
  drawMatches(im1, keypoints1_best, im2, keypoints2_best, goodMatches, imMatches);
207
208
  // Use homography to warp image
209
  warpPerspective(im1, im1Reg, h, im2.size());
210
  warpPerspective(im1, im1Overlay, h, im2.size());
211
}
212
213
// align images with FLANN algorithm
214
void alignImagesFLANN(Mat &im1, Mat &im2, Mat &im1Reg, Mat &im1Overlay, Mat &imMatches, Mat &h,
215
                 const bool invert_query, const bool invert_ref,
216
                 const char* flipflop_query, const char* flipflop_ref,
217
                 const char* rotate_query, const char* rotate_ref)
218
{
219
220
  // seed
221
  cv::setRNGSeed(0);
222
223
  // Convert images to grayscale
224
  Mat im1Gray, im2Gray;
225
  cvtColor(im1, im1Gray, cv::COLOR_BGR2GRAY);
226
  cvtColor(im2, im2Gray, cv::COLOR_BGR2GRAY);
227
228
  // Process images
229
  Mat im1Proc, im2Proc, im1NormalProc;
230
  im1Proc = preprocessImage(im1Gray, invert_query, flipflop_query, rotate_query);
231
  im1NormalProc = preprocessImage(im1, FALSE, flipflop_query, rotate_query);
232
  im2Proc = preprocessImage(im2Gray, invert_ref, flipflop_ref, rotate_ref);
233
234
  // Variables to store keypoints and descriptors
235
  std::vector<KeyPoint> keypoints1, keypoints2;
236
  Mat descriptors1, descriptors2;
237
238
  // Detect SIFT features
239
  Ptr<Feature2D> sift = cv::SIFT::create();
240
  sift->detectAndCompute(im1Proc, Mat(), keypoints1, descriptors1);
241
  sift->detectAndCompute(im2Proc, Mat(), keypoints2, descriptors2);
242
  Rcout << "DONE: sift based key-points detection and descriptors computation" << endl;
243
244
  // Match features using FLANN matching
245
  std::vector<std::vector<DMatch>> matches;
246
  cv::FlannBasedMatcher custom_matcher = cv::FlannBasedMatcher(cv::makePtr<cv::flann::KDTreeIndexParams>(5), cv::makePtr<cv::flann::SearchParams>(50, 0, TRUE));
247
  cv::Ptr<cv::FlannBasedMatcher> matcher = custom_matcher.create();
248
  matcher->knnMatch(descriptors1, descriptors2, matches, 2);
249
  Rcout << "DONE: FLANN - Fast Library for Approximate Nearest Neighbors - descriptor matching" << endl;
250
251
  // Find good matches
252
  // goodMatches = get_good_matches(matches)
253
  std::vector<DMatch> good_matches;
254
  getGoodMatches(matches, good_matches);
255
  Rcout << "DONE: get good matches by distance thresholding" << endl;
256
257
  // Extract location of good matches
258
  std::vector<Point2f> points1, points2;
259
  for( size_t i = 0; i < good_matches.size(); i++ )
260
  {
261
    points1.push_back(keypoints1[good_matches[i].queryIdx].pt);
262
    points2.push_back(keypoints2[good_matches[i].trainIdx].pt);
263
  }
264
265
  // Find homography
266
  cv::Mat mask;
267
  h = findHomography(points1, points2, RANSAC, 5, mask);
268
  Rcpp::Rcout << "DONE: calculated homography matrix" << endl;
269
270
  // Draw top matches and good ones only
271
  std::vector<cv::DMatch> top_matches;
272
  std::vector<cv::KeyPoint> keypoints1_best, keypoints2_best;
273
  for(size_t i = 0; i < good_matches.size(); i++ )
274
  {
275
    keypoints1_best.push_back(keypoints1[good_matches[i].queryIdx]);
276
    keypoints2_best.push_back(keypoints2[good_matches[i].trainIdx]);
277
    top_matches.push_back(cv::DMatch(static_cast<int>(i), static_cast<int>(i), 0));
278
  }
279
  drawMatches(im1Proc, keypoints1_best, im2Proc, keypoints2_best, top_matches, imMatches);
280
  
281
  // Visualize matches, use for demonstration purposes
282
  // std::vector<cv::DMatch> top_matches_vis;
283
  // std::vector<cv::KeyPoint> keypoints1_best_vis, keypoints2_best_vis;
284
  // for(size_t i = 0; i < good_matches.size(); i++)
285
  // {
286
  //   keypoints1_best_vis.push_back(keypoints1[good_matches[i].queryIdx]);
287
  //   keypoints2_best_vis.push_back(keypoints2[good_matches[i].trainIdx]);
288
  //   top_matches_vis.push_back(cv::DMatch(static_cast<int>(i), static_cast<int>(i), 0));
289
  // }
290
  // Mat im1Proc_vis, im2Proc_vis, im1NormalProc_vis, imMatches_vis;
291
  // im1Proc_vis = preprocessImage(im1, invert_query, flipflop_query, rotate_query);
292
  // im2Proc_vis = preprocessImage(im2, invert_ref, flipflop_ref, rotate_ref);
293
  // drawMatches(im1Proc_vis, keypoints1_best, im2Proc_vis, keypoints2_best, top_matches, imMatches_vis);
294
  // cv::imwrite("matches.jpg", imMatches_vis);
295
  
296
  // check keypoints
297
  std::string is_faulty = check_transformation_metrics(keypoints1_best, keypoints2_best, im1, im2, h, mask);
298
299
  // Use homography to warp image
300
  Mat im1Warp, im1NormalWarp;
301
  warpPerspective(im1Proc, im1Warp, h, im2Proc.size());
302
  warpPerspective(im1NormalProc, im1NormalWarp, h, im2Proc.size());
303
  im1Reg = reversepreprocessImage(im1NormalWarp, flipflop_ref, rotate_ref);
304
  Rcout << "DONE: warped query image" << endl;
305
306
  // change color map
307
  Mat im1Combine;
308
  cv::addWeighted(im2Proc, 0.7, im1Warp, 0.3, 0, im1Combine);
309
310
  // return as rgb
311
  cvtColor(im1Combine, im1Overlay, cv::COLOR_GRAY2BGR);
312
  cvtColor(im2Proc, im2, cv::COLOR_GRAY2BGR);
313
}
314
315
// align images with FLANN algorithm
316
void alignImagesFLANNTPS(Mat &im1, Mat &im2, Mat &im1Reg, Mat &im1Overlay, 
317
                         Mat &imMatches, Mat &h, Rcpp::List &keypoints,
318
                         const bool invert_query, const bool invert_ref,
319
                         const char* flipflop_query, const char* flipflop_ref,
320
                         const char* rotate_query, const char* rotate_ref)
321
{
322
  
323
  // seed
324
  cv::setRNGSeed(0);
325
  
326
  // Convert images to grayscale
327
  Mat im1Gray, im2Gray;
328
  cvtColor(im1, im1Gray, cv::COLOR_BGR2GRAY);
329
  cvtColor(im2, im2Gray, cv::COLOR_BGR2GRAY);
330
  
331
  // Process images
332
  Mat im1Proc, im2Proc, im1NormalProc;
333
  im1Proc = preprocessImage(im1Gray, invert_query, flipflop_query, rotate_query);
334
  im1NormalProc = preprocessImage(im1, FALSE, flipflop_query, rotate_query);
335
  im2Proc = preprocessImage(im2Gray, invert_ref, flipflop_ref, rotate_ref);
336
  
337
  // Variables to store keypoints and descriptors
338
  std::vector<KeyPoint> keypoints1, keypoints2;
339
  Mat descriptors1, descriptors2;
340
  
341
  // Detect SIFT features
342
  Ptr<Feature2D> sift = cv::SIFT::create();
343
  sift->detectAndCompute(im1Proc, Mat(), keypoints1, descriptors1);
344
  sift->detectAndCompute(im2Proc, Mat(), keypoints2, descriptors2);
345
  Rcout << "DONE: sift based key-points detection and descriptors computation" << endl;
346
  
347
  // Match features using FLANN matching
348
  std::vector<std::vector<DMatch>> matches;
349
  cv::FlannBasedMatcher custom_matcher = cv::FlannBasedMatcher(cv::makePtr<cv::flann::KDTreeIndexParams>(5), cv::makePtr<cv::flann::SearchParams>(50, 0, TRUE));
350
  cv::Ptr<cv::FlannBasedMatcher> matcher = custom_matcher.create();
351
  matcher->knnMatch(descriptors1, descriptors2, matches, 2);
352
  Rcout << "DONE: FLANN - Fast Library for Approximate Nearest Neighbors - descriptor matching" << endl;
353
  
354
  // Find good matches
355
  std::vector<DMatch> good_matches;
356
  getGoodMatches(matches, good_matches);
357
  Rcout << "DONE: get good matches by distance thresholding" << endl;
358
  
359
  // Extract location of good matches
360
  std::vector<Point2f> points1, points2;
361
  for( size_t i = 0; i < good_matches.size(); i++ )
362
  {
363
    points1.push_back(keypoints1[good_matches[i].queryIdx].pt);
364
    points2.push_back(keypoints2[good_matches[i].trainIdx].pt);
365
  }
366
  
367
  // Find homography
368
  cv::Mat mask;
369
  h = findHomography(points1, points2, RANSAC, 5, mask);
370
  Rcout << "DONE: calculated homography matrix" << endl;
371
372
  // Use homography to warp image
373
  Mat im1Warp, im1NormalWarp;
374
  warpPerspective(im1Proc, im1Warp, h, im2Proc.size());
375
  warpPerspective(im1NormalProc, im1NormalWarp, h, im2Proc.size());
376
  Rcout << "DONE: warped query image" << endl;
377
  
378
  // Draw top matches and good ones only
379
  std::vector<cv::DMatch> top_matches;
380
  std::vector<cv::KeyPoint> keypoints1_best, keypoints2_best;
381
  for(size_t i = 0; i < good_matches.size(); i++ )
382
  {
383
    keypoints1_best.push_back(keypoints1[good_matches[i].queryIdx]);
384
    keypoints2_best.push_back(keypoints2[good_matches[i].trainIdx]);
385
    top_matches.push_back(cv::DMatch(static_cast<int>(i), static_cast<int>(i), 0));
386
  }
387
  drawMatches(im1Proc, keypoints1_best, im2Proc, keypoints2_best, top_matches, imMatches);
388
  
389
  // check keypoints
390
  std::string is_faulty = check_transformation_metrics(keypoints1_best, keypoints2_best, im1, im2, h, mask);
391
  
392
  // continue with TPS or do FLANN only
393
  Mat im1Reg_Warp_nonrigid;
394
  Mat im1Reg_NormalWarp_nonrigid;
395
  if(is_faulty == "degenerate"){
396
    
397
    // change color map
398
    Mat im1Combine;
399
    cv::addWeighted(im2Proc, 0.7, im1Warp, 0.3, 0, im1Combine);
400
    
401
    // Reverse process
402
    im1Reg = reversepreprocessImage(im1NormalWarp, flipflop_ref, rotate_ref);
403
    
404
    // return as rgb
405
    cvtColor(im1Combine, im1Overlay, cv::COLOR_GRAY2BGR);
406
    cvtColor(im2Proc, im2, cv::COLOR_GRAY2BGR);
407
408
  // if FLANN succeeds   
409
  } else {
410
    
411
    // Filtered points (inliers) based on the mask
412
    std::vector<cv::Point2f> filtered_points1;
413
    std::vector<cv::Point2f> filtered_points2;
414
    for (int i = 0; i < mask.rows; i++) {
415
      if (mask.at<uchar>(i)) {
416
        filtered_points1.push_back(points1[i]);
417
        filtered_points2.push_back(points2[i]);
418
      }
419
    }
420
    removeCloseMatches(filtered_points1, filtered_points2);
421
    
422
    // transform query
423
    std::vector<cv::Point2f> filtered_points1_reg;
424
    cv::perspectiveTransform(filtered_points1, filtered_points1_reg, h);
425
    
426
    // get TPS matches
427
    std::vector<cv::DMatch> matches;
428
    for (unsigned int i = 0; i < filtered_points2.size(); i++)
429
      matches.push_back(cv::DMatch(i, i, 0));
430
    
431
    // calculate TPS transformation
432
    Ptr<ThinPlateSplineShapeTransformer> tps = cv::createThinPlateSplineShapeTransformer(0);
433
    tps->estimateTransformation(filtered_points2, filtered_points1_reg, matches);
434
    
435
    // save keypoints 
436
    keypoints[0] = point2fToNumericMatrix(filtered_points2);
437
    keypoints[1] = point2fToNumericMatrix(filtered_points1_reg); 
438
    
439
    // determine extension limits for both images
440
    int y_max = max(im1Warp.rows, im2.rows);
441
    int x_max = max(im1Warp.cols, im2.cols);
442
    
443
    // extend images
444
    cv::copyMakeBorder(im1Warp, im1Warp, 0.0, (int) (y_max - im1Warp.rows), 0.0, (x_max - im1Warp.cols), cv::BORDER_CONSTANT, Scalar(0, 0, 0));
445
    cv::copyMakeBorder(im1NormalWarp, im1NormalWarp, 0.0, (int) (y_max - im1NormalWarp.rows), 0.0, (x_max - im1NormalWarp.cols), cv::BORDER_CONSTANT, Scalar(0, 0, 0));
446
447
    // transform image
448
    Mat im1Reg_Warp_nonrigid;
449
    Mat im1Reg_NormalWarp_nonrigid;
450
    tps->warpImage(im1Warp, im1Reg_Warp_nonrigid);
451
    tps->warpImage(im1NormalWarp, im1Reg_NormalWarp_nonrigid);
452
    
453
    // resize image
454
    cv::Mat im1Reg_NormalWarp_nonrigid_cropped  = im1Reg_NormalWarp_nonrigid(cv::Range(0,im2Proc.size().height), cv::Range(0,im2Proc.size().width));
455
    im1Reg_NormalWarp_nonrigid = im1Reg_NormalWarp_nonrigid_cropped.clone();
456
    
457
    cv::Mat im1Reg_Warp_nonrigid_cropped  = im1Reg_Warp_nonrigid(cv::Range(0,im2Proc.size().height), cv::Range(0,im2Proc.size().width));
458
    im1Reg_Warp_nonrigid = im1Reg_Warp_nonrigid_cropped.clone();
459
    
460
    // change color map
461
    Mat im1Combine;
462
    cv::addWeighted(im2Proc, 0.7, im1Reg_Warp_nonrigid, 0.3, 0, im1Combine);
463
464
    // Reverse process
465
    im1Reg = reversepreprocessImage(im1Reg_NormalWarp_nonrigid, flipflop_ref, rotate_ref);
466
    
467
    // return as rgb
468
    cvtColor(im1Combine, im1Overlay, cv::COLOR_GRAY2BGR);
469
    cvtColor(im2Proc, im2, cv::COLOR_GRAY2BGR);
470
  }
471
}
472
473
// [[Rcpp::export]]
474
Rcpp::List automated_registeration_rawvector(Rcpp::RawVector ref_image, Rcpp::RawVector query_image,
475
                                             const int width1, const int height1,
476
                                             const int width2, const int height2,
477
                                             const float GOOD_MATCH_PERCENT, const int MAX_FEATURES,
478
                                             const bool invert_query, const bool invert_ref,
479
                                             Rcpp::String flipflop_query, Rcpp::String flipflop_ref,
480
                                             Rcpp::String rotate_query, Rcpp::String rotate_ref,
481
                                             Rcpp::String matcher, Rcpp::String method)
482
{
483
  // Return data
484
  Rcpp::List out(5);
485
  Rcpp::List out_trans(2);
486
  Rcpp::List keypoints(2);
487
  Mat imOverlay, imReg, h, imMatches;
488
  
489
  // Read reference image
490
  cv::Mat imReference = imageToMat(ref_image, width1, height1);
491
492
  // Read image to be aligned
493
  cv::Mat im = imageToMat(query_image, width2, height2);
494
  
495
  // Homography (with FLANN) + Non-rigid (TPS)
496
  if(strcmp(matcher.get_cstring(), "FLANN") == 0 && strcmp(method.get_cstring(), "Homography + Non-Rigid") == 0){
497
    alignImagesFLANNTPS(im, imReference, imReg, imOverlay, imMatches, 
498
                        h, keypoints, 
499
                        invert_query, invert_ref,
500
                        flipflop_query.get_cstring(), flipflop_ref.get_cstring(), 
501
                        rotate_query.get_cstring(), rotate_ref.get_cstring());
502
  }
503
  
504
  // Homography (with FLANN)
505
  if(strcmp(matcher.get_cstring(), "FLANN") == 0 && strcmp(method.get_cstring(), "Homography") == 0){
506
    alignImagesFLANN(im, imReference, imReg, imOverlay, imMatches, 
507
                     h, invert_query, invert_ref,
508
                     flipflop_query.get_cstring(), flipflop_ref.get_cstring(), 
509
                     rotate_query.get_cstring(), rotate_ref.get_cstring());
510
  }
511
  
512
  // Homography (with Brute-Force matching)
513
  if(strcmp(matcher.get_cstring(), "BRUTE-FORCE") == 0 && strcmp(method.get_cstring(), "Homography") == 0){
514
    alignImagesBRUTE(im, imReference, imReg, imOverlay, imMatches, 
515
                     h, GOOD_MATCH_PERCENT, MAX_FEATURES);
516
  }
517
518
  // transformation matrix, can be either a matrix, set of keypoints or both
519
  out_trans[0] = matToNumericMatrix(h.clone());
520
  out_trans[1] = keypoints;
521
  out[0] = out_trans;
522
  
523
  // destination image, registered image, keypoint matching image
524
  out[1] = matToImage(imReference.clone());
525
  
526
  // registered image
527
  out[2] = matToImage(imReg.clone());
528
  
529
  // keypoint matching image
530
  out[3] = matToImage(imMatches.clone());
531
  
532
  // overlay image
533
  out[4] = matToImage(imOverlay.clone());
534
  
535
  // return
536
  return out;
537
}