Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
vpKeyPoint.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Key point functionalities.
32 */
33
34#include <iomanip>
35#include <limits>
36
37#include <visp3/core/vpIoTools.h>
38#include <visp3/vision/vpKeyPoint.h>
39
40#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
41
42#include <pugixml.hpp>
43
44namespace
45{
46// Specific Type transformation functions
47inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
48{
49 if (knnMatches.size() > 0) {
50 return knnMatches[0];
51 }
52
53 return cv::DMatch();
54}
55
56inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
57{
58 return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
59}
60
61} // namespace
62
72vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
73 const std::string &matcherName, const vpFilterMatchingType &filterType)
74 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
75 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
76 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
77 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
78 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
79 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
80 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
81 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
82 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
83 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
84#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
85 m_useBruteForceCrossCheck(true),
86#endif
87 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
88 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
89{
90 initFeatureNames();
91
92 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
93 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
94
95 init();
96}
97
107vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
108 const std::string &matcherName, const vpFilterMatchingType &filterType)
109 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
110 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
111 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
112 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
113 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
114 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
115 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
116 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
117 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
118 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
119#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
120 m_useBruteForceCrossCheck(true),
121#endif
122 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
123 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
124{
125 initFeatureNames();
126
127 m_detectorNames.push_back(detectorName);
128 m_extractorNames.push_back(extractorName);
129
130 init();
131}
132
142vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
143 const std::string &matcherName, const vpFilterMatchingType &filterType)
144 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
145 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
146 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
147 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
148 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
149 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
150 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
151 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
152 m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
153 m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
154 m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
155#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
156 m_useBruteForceCrossCheck(true),
157#endif
158 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
159 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
160{
161 initFeatureNames();
162 init();
163}
164
173void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
174{
175 int h = img.rows;
176 int w = img.cols;
177
178 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
179
180 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
181
182 // if (phi != 0.0) {
183 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
184 phi *= M_PI / 180.;
185 double s = sin(phi);
186 double c = cos(phi);
187
188 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
189
190 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
191 cv::Mat tcorners = corners * A.t();
192 cv::Mat tcorners_x, tcorners_y;
193 tcorners.col(0).copyTo(tcorners_x);
194 tcorners.col(1).copyTo(tcorners_y);
195 std::vector<cv::Mat> channels;
196 channels.push_back(tcorners_x);
197 channels.push_back(tcorners_y);
198 cv::merge(channels, tcorners);
199
200 cv::Rect rect = cv::boundingRect(tcorners);
201 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
202
203 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
204 }
205 // if (tilt != 1.0) {
206 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
207 double s = 0.8 * sqrt(tilt * tilt - 1);
208 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
209 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
210 A.row(0) = A.row(0) / tilt;
211 }
212 // if (tilt != 1.0 || phi != 0.0) {
213 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
214 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
215 h = img.rows;
216 w = img.cols;
217 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
218 }
219 cv::invertAffineTransform(A, Ai);
220}
221
229
236unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color) { return buildReference(I_color, vpRect()); }
237
247unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
248 unsigned int width)
249{
250 return buildReference(I, vpRect(iP, width, height));
251}
252
262unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
263 unsigned int width)
264{
265 return buildReference(I_color, vpRect(iP, width, height));
266}
267
275unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
276{
277 // Reset variables used when dealing with 3D models
278 // So as no 3D point list is passed, we dont need this variables
279 m_trainPoints.clear();
280 m_mapOfImageId.clear();
281 m_mapOfImages.clear();
282 m_currentImageId = 1;
283
284 if (m_useAffineDetection) {
285 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
286 std::vector<cv::Mat> listOfTrainDescriptors;
287
288 // Detect keypoints and extract descriptors on multiple images
289 detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
290
291 // Flatten the different train lists
292 m_trainKeyPoints.clear();
293 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
294 it != listOfTrainKeyPoints.end(); ++it) {
295 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
296 }
297
298 bool first = true;
299 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
300 ++it) {
301 if (first) {
302 first = false;
303 it->copyTo(m_trainDescriptors);
304 } else {
305 m_trainDescriptors.push_back(*it);
306 }
307 }
308 } else {
309 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
310 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
311 }
312
313 // Save the correspondence keypoint class_id with the training image_id in a map.
314 // Used to display the matching with all the training images.
315 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
316 m_mapOfImageId[it->class_id] = m_currentImageId;
317 }
318
319 // Save the image in a map at a specific image_id
320 m_mapOfImages[m_currentImageId] = I;
321
322 // Convert OpenCV type to ViSP type for compatibility
324
325 _reference_computed = true;
326
327 // Add train descriptors in matcher object
328 m_matcher->clear();
329 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
330
331 return static_cast<unsigned int>(m_trainKeyPoints.size());
332}
333
341unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
342{
343 vpImageConvert::convert(I_color, m_I);
344 return (buildReference(m_I, rectangle));
345}
346
358unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
359 std::vector<cv::Point3f> &points3f, bool append, int class_id)
360{
361 cv::Mat trainDescriptors;
362 // Copy the input list of keypoints
363 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
364
365 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
366
367 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
368 // Keypoints have been removed
369 // Store the hash of a keypoint as the key and the index of the keypoint
370 // as the value
371 std::map<size_t, size_t> mapOfKeypointHashes;
372 size_t cpt = 0;
373 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
374 ++it, cpt++) {
375 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
376 }
377
378 std::vector<cv::Point3f> trainPoints_tmp;
379 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
380 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
381 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
382 }
383 }
384
385 // Copy trainPoints_tmp to points3f
386 points3f = trainPoints_tmp;
387 }
388
389 return (buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
390}
391
403unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
404 std::vector<cv::Point3f> &points3f, bool append, int class_id)
405{
406 cv::Mat trainDescriptors;
407 // Copy the input list of keypoints
408 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
409
410 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
411
412 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
413 // Keypoints have been removed
414 // Store the hash of a keypoint as the key and the index of the keypoint
415 // as the value
416 std::map<size_t, size_t> mapOfKeypointHashes;
417 size_t cpt = 0;
418 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
419 ++it, cpt++) {
420 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
421 }
422
423 std::vector<cv::Point3f> trainPoints_tmp;
424 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
425 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
426 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
427 }
428 }
429
430 // Copy trainPoints_tmp to points3f
431 points3f = trainPoints_tmp;
432 }
433
434 return (buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
435}
436
451 const std::vector<cv::KeyPoint> &trainKeyPoints,
452 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
453 bool append, int class_id)
454{
455 if (!append) {
456 m_trainPoints.clear();
457 m_mapOfImageId.clear();
458 m_mapOfImages.clear();
459 m_currentImageId = 0;
460 m_trainKeyPoints.clear();
461 }
462
463 m_currentImageId++;
464
465 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
466 // Set class_id if != -1
467 if (class_id != -1) {
468 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
469 it->class_id = class_id;
470 }
471 }
472
473 // Save the correspondence keypoint class_id with the training image_id in a map.
474 // Used to display the matching with all the training images.
475 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
476 ++it) {
477 m_mapOfImageId[it->class_id] = m_currentImageId;
478 }
479
480 // Save the image in a map at a specific image_id
481 m_mapOfImages[m_currentImageId] = I;
482
483 // Append reference lists
484 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
485 if (!append) {
486 trainDescriptors.copyTo(m_trainDescriptors);
487 } else {
488 m_trainDescriptors.push_back(trainDescriptors);
489 }
490 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
491
492 // Convert OpenCV type to ViSP type for compatibility
494 vpConvert::convertFromOpenCV(m_trainPoints, m_trainVpPoints);
495
496 // Add train descriptors in matcher object
497 m_matcher->clear();
498 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
499
500 _reference_computed = true;
501
502 return static_cast<unsigned int>(m_trainKeyPoints.size());
503}
504
517unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
518 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
519 bool append, int class_id)
520{
521 vpImageConvert::convert(I_color, m_I);
522 return (buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
523}
524
540void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
541 const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
542{
543 /* compute plane equation */
544 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
545 vpPoint pts[3];
546 pts[0] = *it_roi;
547 ++it_roi;
548 pts[1] = *it_roi;
549 ++it_roi;
550 pts[2] = *it_roi;
551 vpPlane Po(pts[0], pts[1], pts[2]);
552 double xc = 0.0, yc = 0.0;
553 vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
554 double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
555 double X = xc * Z;
556 double Y = yc * Z;
557 vpColVector point_cam(4);
558 point_cam[0] = X;
559 point_cam[1] = Y;
560 point_cam[2] = Z;
561 point_cam[3] = 1;
562 vpColVector point_obj(4);
563 point_obj = cMo.inverse() * point_cam;
564 point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
565}
566
582void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
583 const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
584{
585 /* compute plane equation */
586 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
587 vpPoint pts[3];
588 pts[0] = *it_roi;
589 ++it_roi;
590 pts[1] = *it_roi;
591 ++it_roi;
592 pts[2] = *it_roi;
593 vpPlane Po(pts[0], pts[1], pts[2]);
594 double xc = 0.0, yc = 0.0;
595 vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
596 double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
597 double X = xc * Z;
598 double Y = yc * Z;
599 vpColVector point_cam(4);
600 point_cam[0] = X;
601 point_cam[1] = Y;
602 point_cam[2] = Z;
603 point_cam[3] = 1;
604 vpColVector point_obj(4);
605 point_obj = cMo.inverse() * point_cam;
606 point.setWorldCoordinates(point_obj);
607}
608
626 std::vector<cv::KeyPoint> &candidates,
627 const std::vector<vpPolygon> &polygons,
628 const std::vector<std::vector<vpPoint> > &roisPt,
629 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
630{
631 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
632 candidates.clear();
633 points.clear();
634 vpImagePoint imPt;
635 cv::Point3f pt;
636 cv::Mat desc;
637
638 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
639 for (size_t i = 0; i < candidatesToCheck.size(); i++) {
640 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
641 }
642
643 size_t cpt1 = 0;
644 std::vector<vpPolygon> polygons_tmp = polygons;
645 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
646 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
647
648 while (it2 != pairOfCandidatesToCheck.end()) {
649 imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
650 if (it1->isInside(imPt)) {
651 candidates.push_back(it2->first);
652 vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
653 points.push_back(pt);
654
655 if (descriptors != NULL) {
656 desc.push_back(descriptors->row((int)it2->second));
657 }
658
659 // Remove candidate keypoint which is located on the current polygon
660 it2 = pairOfCandidatesToCheck.erase(it2);
661 } else {
662 ++it2;
663 }
664 }
665 }
666
667 if (descriptors != NULL) {
668 desc.copyTo(*descriptors);
669 }
670}
671
689 std::vector<vpImagePoint> &candidates,
690 const std::vector<vpPolygon> &polygons,
691 const std::vector<std::vector<vpPoint> > &roisPt,
692 std::vector<vpPoint> &points, cv::Mat *descriptors)
693{
694 std::vector<vpImagePoint> candidatesToCheck = candidates;
695 candidates.clear();
696 points.clear();
697 vpPoint pt;
698 cv::Mat desc;
699
700 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
701 for (size_t i = 0; i < candidatesToCheck.size(); i++) {
702 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
703 }
704
705 size_t cpt1 = 0;
706 std::vector<vpPolygon> polygons_tmp = polygons;
707 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
708 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
709
710 while (it2 != pairOfCandidatesToCheck.end()) {
711 if (it1->isInside(it2->first)) {
712 candidates.push_back(it2->first);
713 vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
714 points.push_back(pt);
715
716 if (descriptors != NULL) {
717 desc.push_back(descriptors->row((int)it2->second));
718 }
719
720 // Remove candidate keypoint which is located on the current polygon
721 it2 = pairOfCandidatesToCheck.erase(it2);
722 } else {
723 ++it2;
724 }
725 }
726 }
727}
728
745 const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
746 const std::vector<vpCylinder> &cylinders,
747 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
748 cv::Mat *descriptors)
749{
750 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
751 candidates.clear();
752 points.clear();
753 cv::Mat desc;
754
755 // Keep only keypoints on cylinders
756 size_t cpt_keypoint = 0;
757 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
758 ++it1, cpt_keypoint++) {
759 size_t cpt_cylinder = 0;
760
761 // Iterate through the list of vpCylinders
762 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
763 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
764 // Iterate through the list of the bounding boxes of the current
765 // vpCylinder
766 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
767 if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
768 candidates.push_back(*it1);
769
770 // Calculate the 3D coordinates for each keypoint located on
771 // cylinders
772 double xm = 0.0, ym = 0.0;
773 vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
774 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
775
776 if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
777 vpColVector point_cam(4);
778 point_cam[0] = xm * Z;
779 point_cam[1] = ym * Z;
780 point_cam[2] = Z;
781 point_cam[3] = 1;
782 vpColVector point_obj(4);
783 point_obj = cMo.inverse() * point_cam;
784 vpPoint pt;
785 pt.setWorldCoordinates(point_obj);
786 points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
787
788 if (descriptors != NULL) {
789 desc.push_back(descriptors->row((int)cpt_keypoint));
790 }
791
792 break;
793 }
794 }
795 }
796 }
797 }
798
799 if (descriptors != NULL) {
800 desc.copyTo(*descriptors);
801 }
802}
803
820 const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
821 const std::vector<vpCylinder> &cylinders,
822 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
823 cv::Mat *descriptors)
824{
825 std::vector<vpImagePoint> candidatesToCheck = candidates;
826 candidates.clear();
827 points.clear();
828 cv::Mat desc;
829
830 // Keep only keypoints on cylinders
831 size_t cpt_keypoint = 0;
832 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
833 ++it1, cpt_keypoint++) {
834 size_t cpt_cylinder = 0;
835
836 // Iterate through the list of vpCylinders
837 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
838 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
839 // Iterate through the list of the bounding boxes of the current
840 // vpCylinder
841 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
842 if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
843 candidates.push_back(*it1);
844
845 // Calculate the 3D coordinates for each keypoint located on
846 // cylinders
847 double xm = 0.0, ym = 0.0;
848 vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
849 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
850
851 if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
852 vpColVector point_cam(4);
853 point_cam[0] = xm * Z;
854 point_cam[1] = ym * Z;
855 point_cam[2] = Z;
856 point_cam[3] = 1;
857 vpColVector point_obj(4);
858 point_obj = cMo.inverse() * point_cam;
859 vpPoint pt;
860 pt.setWorldCoordinates(point_obj);
861 points.push_back(pt);
862
863 if (descriptors != NULL) {
864 desc.push_back(descriptors->row((int)cpt_keypoint));
865 }
866
867 break;
868 }
869 }
870 }
871 }
872 }
873
874 if (descriptors != NULL) {
875 desc.copyTo(*descriptors);
876 }
877}
878
892bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
893 const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
894 double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
895{
896 double t = vpTime::measureTimeMs();
897
898 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
899 elapsedTime = (vpTime::measureTimeMs() - t);
900 std::cerr << "Not enough points to compute the pose (at least 4 points "
901 "are needed)."
902 << std::endl;
903
904 return false;
905 }
906
907 cv::Mat cameraMatrix =
908 (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
909 cv::Mat rvec, tvec;
910
911 // Bug with OpenCV < 2.4.0 when zero distortion is provided by an empty
912 // array. http://code.opencv.org/issues/1700 ;
913 // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
914 // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
915 // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
916 // cv::Mat distCoeffs;
917 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
918
919 try {
920#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
921 // OpenCV 3.0.0 (2014/12/12)
922 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
923 (float)m_ransacReprojectionError,
924 0.99, // confidence=0.99 (default) – The probability
925 // that the algorithm produces a useful result.
926 inlierIndex, cv::SOLVEPNP_ITERATIVE);
927// SOLVEPNP_ITERATIVE (default): Iterative method is based on
928// Levenberg-Marquardt optimization. In this case the function finds such a
929// pose that minimizes reprojection error, that is the sum of squared
930// distances between the observed projections imagePoints and the projected
931// (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
932// the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
933// Classification for the Perspective-Three-Point Problem”. In this case the
934// function requires exactly four object and image points. SOLVEPNP_EPNP:
935// Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
936// paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
937// SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
938// Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
939// Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
940// F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
941// Length Estimation”. In this case the function also estimates the
942// parameters
943// f_x and f_y assuming that both have the same value. Then the cameraMatrix
944// is updated with the estimated focal length.
945#else
946 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
947 if (m_useConsensusPercentage) {
948 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
949 }
950
951 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
952 (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
953#endif
954 } catch (cv::Exception &e) {
955 std::cerr << e.what() << std::endl;
956 elapsedTime = (vpTime::measureTimeMs() - t);
957 return false;
958 }
959 vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
960 vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
961 cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
962
963 if (func != NULL) {
964 // Check the final pose returned by solvePnPRansac to discard
965 // solutions which do not respect the pose criterion.
966 if (!func(cMo)) {
967 elapsedTime = (vpTime::measureTimeMs() - t);
968 return false;
969 }
970 }
971
972 elapsedTime = (vpTime::measureTimeMs() - t);
973 return true;
974}
975
988bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
989 std::vector<vpPoint> &inliers, double &elapsedTime,
990 bool (*func)(const vpHomogeneousMatrix &))
991{
992 std::vector<unsigned int> inlierIndex;
993 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
994}
995
1009bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
1010 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
1011 bool (*func)(const vpHomogeneousMatrix &))
1012{
1013 double t = vpTime::measureTimeMs();
1014
1015 if (objectVpPoints.size() < 4) {
1016 elapsedTime = (vpTime::measureTimeMs() - t);
1017 // std::cerr << "Not enough points to compute the pose (at least 4
1018 // points are needed)." << std::endl;
1019
1020 return false;
1021 }
1022
1023 vpPose pose;
1024
1025 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1026 pose.addPoint(*it);
1027 }
1028
1029 unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
1030 if (m_useConsensusPercentage) {
1031 nbInlierToReachConsensus =
1032 (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
1033 }
1034
1035 pose.setRansacFilterFlag(m_ransacFilterFlag);
1036 pose.setUseParallelRansac(m_ransacParallel);
1037 pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
1038 pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
1039 pose.setRansacThreshold(m_ransacThreshold);
1040 pose.setRansacMaxTrials(m_nbRansacIterations);
1041
1042 bool isRansacPoseEstimationOk = false;
1043 try {
1044 pose.setCovarianceComputation(m_computeCovariance);
1045 isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
1046 inliers = pose.getRansacInliers();
1047 inlierIndex = pose.getRansacInlierIndex();
1048
1049 if (m_computeCovariance) {
1050 m_covarianceMatrix = pose.getCovarianceMatrix();
1051 }
1052 } catch (const vpException &e) {
1053 std::cerr << "e=" << e.what() << std::endl;
1054 elapsedTime = (vpTime::measureTimeMs() - t);
1055 return false;
1056 }
1057
1058 // if(func != NULL && isRansacPoseEstimationOk) {
1059 // //Check the final pose returned by the Ransac VVS pose estimation as
1060 // in rare some cases
1061 // //we can converge toward a final cMo that does not respect the pose
1062 // criterion even
1063 // //if the 4 minimal points picked to respect the pose criterion.
1064 // if(!func(&cMo)) {
1065 // elapsedTime = (vpTime::measureTimeMs() - t);
1066 // return false;
1067 // }
1068 // }
1069
1070 elapsedTime = (vpTime::measureTimeMs() - t);
1071 return isRansacPoseEstimationOk;
1072}
1073
1088double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1089 const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
1090{
1091 if (matchKeyPoints.size() == 0) {
1092 // return std::numeric_limits<double>::max(); // create an error under
1093 // Windows. To fix it we have to add #undef max
1094 return DBL_MAX;
1095 }
1096
1097 std::vector<double> errors(matchKeyPoints.size());
1098 size_t cpt = 0;
1099 vpPoint pt;
1100 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1101 it != matchKeyPoints.end(); ++it, cpt++) {
1102 pt.set_oX(it->second.x);
1103 pt.set_oY(it->second.y);
1104 pt.set_oZ(it->second.z);
1105 pt.project(cMo_est);
1106 double u = 0.0, v = 0.0;
1107 vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
1108 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1109 }
1110
1111 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1112}
1113
1123 vpImage<unsigned char> &IMatching)
1124{
1125 // Image matching side by side
1126 unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1127 unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1128
1129 IMatching = vpImage<unsigned char>(height, width);
1130}
1131
1141 vpImage<vpRGBa> &IMatching)
1142{
1143 // Image matching side by side
1144 unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1145 unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1146
1147 IMatching = vpImage<vpRGBa>(height, width);
1148}
1149
1160{
1161 // Nb images in the training database + the current image we want to detect
1162 // the object
1163 unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1164
1165 if (m_mapOfImages.empty()) {
1166 std::cerr << "There is no training image loaded !" << std::endl;
1167 return;
1168 }
1169
1170 if (nbImg == 2) {
1171 // Only one training image, so we display them side by side
1172 createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1173 } else {
1174 // Multiple training images, display them as a mosaic image
1175 //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1176 unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1177
1178 // Number of columns in the mosaic grid
1179 unsigned int nbWidth = nbImgSqrt;
1180 // Number of rows in the mosaic grid
1181 unsigned int nbHeight = nbImgSqrt;
1182
1183 // Deals with non square mosaic grid and the total number of images
1184 if (nbImgSqrt * nbImgSqrt < nbImg) {
1185 nbWidth++;
1186 }
1187
1188 unsigned int maxW = ICurrent.getWidth();
1189 unsigned int maxH = ICurrent.getHeight();
1190 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1191 ++it) {
1192 if (maxW < it->second.getWidth()) {
1193 maxW = it->second.getWidth();
1194 }
1195
1196 if (maxH < it->second.getHeight()) {
1197 maxH = it->second.getHeight();
1198 }
1199 }
1200
1201 IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
1202 }
1203}
1204
1215{
1216 // Nb images in the training database + the current image we want to detect
1217 // the object
1218 unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1219
1220 if (m_mapOfImages.empty()) {
1221 std::cerr << "There is no training image loaded !" << std::endl;
1222 return;
1223 }
1224
1225 if (nbImg == 2) {
1226 // Only one training image, so we display them side by side
1227 createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1228 } else {
1229 // Multiple training images, display them as a mosaic image
1230 //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1231 unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1232
1233 // Number of columns in the mosaic grid
1234 unsigned int nbWidth = nbImgSqrt;
1235 // Number of rows in the mosaic grid
1236 unsigned int nbHeight = nbImgSqrt;
1237
1238 // Deals with non square mosaic grid and the total number of images
1239 if (nbImgSqrt * nbImgSqrt < nbImg) {
1240 nbWidth++;
1241 }
1242
1243 unsigned int maxW = ICurrent.getWidth();
1244 unsigned int maxH = ICurrent.getHeight();
1245 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1246 ++it) {
1247 if (maxW < it->second.getWidth()) {
1248 maxW = it->second.getWidth();
1249 }
1250
1251 if (maxH < it->second.getHeight()) {
1252 maxH = it->second.getHeight();
1253 }
1254 }
1255
1256 IMatching = vpImage<vpRGBa>(maxH * nbHeight, maxW * nbWidth);
1257 }
1258}
1259
1267void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1268{
1269 double elapsedTime;
1270 detect(I, keyPoints, elapsedTime, rectangle);
1271}
1272
1280void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1281{
1282 double elapsedTime;
1283 detect(I_color, keyPoints, elapsedTime, rectangle);
1284}
1285
1294void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
1295{
1296 double elapsedTime;
1297 detect(matImg, keyPoints, elapsedTime, mask);
1298}
1299
1308void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1309 const vpRect &rectangle)
1310{
1311 cv::Mat matImg;
1312 vpImageConvert::convert(I, matImg, false);
1313 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1314
1315 if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1316 cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1317 rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1318 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1319 } else {
1320 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1321 }
1322
1323 detect(matImg, keyPoints, elapsedTime, mask);
1324}
1325
1334void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1335 const vpRect &rectangle)
1336{
1337 cv::Mat matImg;
1338 vpImageConvert::convert(I_color, matImg);
1339 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1340
1341 if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1342 cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1343 rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1344 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1345 } else {
1346 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1347 }
1348
1349 detect(matImg, keyPoints, elapsedTime, mask);
1350}
1351
1361void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1362 const cv::Mat &mask)
1363{
1364 double t = vpTime::measureTimeMs();
1365 keyPoints.clear();
1366
1367 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1368 it != m_detectors.end(); ++it) {
1369 std::vector<cv::KeyPoint> kp;
1370
1371 it->second->detect(matImg, kp, mask);
1372 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1373 }
1374
1375 elapsedTime = vpTime::measureTimeMs() - t;
1376}
1377
1385void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1386{
1387 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1388 getQueryKeyPoints(vpQueryImageKeyPoints);
1389 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1390 getTrainKeyPoints(vpTrainImageKeyPoints);
1391
1392 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1393 vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1394 vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1395 }
1396}
1397
1405void vpKeyPoint::display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size)
1406{
1407 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1408 getQueryKeyPoints(vpQueryImageKeyPoints);
1409 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1410 getTrainKeyPoints(vpTrainImageKeyPoints);
1411
1412 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1413 vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1414 vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1415 }
1416}
1417
1425void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1426{
1427 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1428 getQueryKeyPoints(vpQueryImageKeyPoints);
1429
1430 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1431 vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1432 }
1433}
1434
1442void vpKeyPoint::display(const vpImage<vpRGBa> &ICurrent, unsigned int size, const vpColor &color)
1443{
1444 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1445 getQueryKeyPoints(vpQueryImageKeyPoints);
1446
1447 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1448 vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1449 }
1450}
1451
1464 unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1465{
1466 bool randomColor = (color == vpColor::none);
1467 srand((unsigned int)time(NULL));
1468 vpColor currentColor = color;
1469
1470 std::vector<vpImagePoint> queryImageKeyPoints;
1471 getQueryKeyPoints(queryImageKeyPoints);
1472 std::vector<vpImagePoint> trainImageKeyPoints;
1473 getTrainKeyPoints(trainImageKeyPoints);
1474
1475 vpImagePoint leftPt, rightPt;
1476 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1477 if (randomColor) {
1478 currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1479 }
1480
1481 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1482 rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1483 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1484 vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1485 vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1486 vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1487 }
1488}
1489
1501void vpKeyPoint::displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1502 unsigned int lineThickness, const vpColor &color)
1503{
1504 bool randomColor = (color == vpColor::none);
1505 srand((unsigned int)time(NULL));
1506 vpColor currentColor = color;
1507
1508 std::vector<vpImagePoint> queryImageKeyPoints;
1509 getQueryKeyPoints(queryImageKeyPoints);
1510 std::vector<vpImagePoint> trainImageKeyPoints;
1511 getTrainKeyPoints(trainImageKeyPoints);
1512
1513 vpImagePoint leftPt, rightPt;
1514 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1515 if (randomColor) {
1516 currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1517 }
1518
1519 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1520 rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1521 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1522 vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1523 vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1524 vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1525 }
1526}
1527
1539void vpKeyPoint::displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1540 unsigned int lineThickness, const vpColor &color)
1541{
1542 bool randomColor = (color == vpColor::none);
1543 srand((unsigned int)time(NULL));
1544 vpColor currentColor = color;
1545
1546 std::vector<vpImagePoint> queryImageKeyPoints;
1547 getQueryKeyPoints(queryImageKeyPoints);
1548 std::vector<vpImagePoint> trainImageKeyPoints;
1549 getTrainKeyPoints(trainImageKeyPoints);
1550
1551 vpImagePoint leftPt, rightPt;
1552 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1553 if (randomColor) {
1554 currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1555 }
1556
1557 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1558 rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1559 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1560 vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1561 vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1562 vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1563 }
1564}
1565
1578 const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1579 unsigned int lineThickness)
1580{
1581 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1582 // No training images so return
1583 std::cerr << "There is no training image loaded !" << std::endl;
1584 return;
1585 }
1586
1587 // Nb images in the training database + the current image we want to detect
1588 // the object
1589 int nbImg = (int)(m_mapOfImages.size() + 1);
1590
1591 if (nbImg == 2) {
1592 // Only one training image, so we display the matching result side-by-side
1593 displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1594 } else {
1595 // Multiple training images, display them as a mosaic image
1596 int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1597 int nbWidth = nbImgSqrt;
1598 int nbHeight = nbImgSqrt;
1599
1600 if (nbImgSqrt * nbImgSqrt < nbImg) {
1601 nbWidth++;
1602 }
1603
1604 std::map<int, int> mapOfImageIdIndex;
1605 int cpt = 0;
1606 unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1607 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1608 ++it, cpt++) {
1609 mapOfImageIdIndex[it->first] = cpt;
1610
1611 if (maxW < it->second.getWidth()) {
1612 maxW = it->second.getWidth();
1613 }
1614
1615 if (maxH < it->second.getHeight()) {
1616 maxH = it->second.getHeight();
1617 }
1618 }
1619
1620 // Indexes of the current image in the grid computed to put preferably the
1621 // image in the center of the mosaic grid
1622 int medianI = nbHeight / 2;
1623 int medianJ = nbWidth / 2;
1624 int medianIndex = medianI * nbWidth + medianJ;
1625 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1626 vpImagePoint topLeftCorner;
1627 int current_class_id_index = 0;
1628 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1629 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1630 } else {
1631 // Shift of one unity the index of the training images which are after
1632 // the current image
1633 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1634 }
1635
1636 int indexI = current_class_id_index / nbWidth;
1637 int indexJ = current_class_id_index - (indexI * nbWidth);
1638 topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1639
1640 // Display cross for keypoints in the learning database
1641 vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1642 (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1643 }
1644
1645 vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1646 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1647 // Display cross for keypoints detected in the current image
1648 vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1649 (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1650 }
1651 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1652 // Display green circle for RANSAC inliers
1653 vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1654 (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1655 }
1656 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1657 // Display red circle for RANSAC outliers
1658 vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1659 (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1660 }
1661
1662 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1663 int current_class_id = 0;
1664 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1665 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1666 } else {
1667 // Shift of one unity the index of the training images which are after
1668 // the current image
1669 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1670 }
1671
1672 int indexI = current_class_id / nbWidth;
1673 int indexJ = current_class_id - (indexI * nbWidth);
1674
1675 vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1676 (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1677 vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1678 (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1679
1680 // Draw line for matching keypoints detected in the current image and
1681 // those detected in the training images
1682 vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1683 }
1684 }
1685}
1686
1699 const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1700 unsigned int lineThickness)
1701{
1702 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1703 // No training images so return
1704 std::cerr << "There is no training image loaded !" << std::endl;
1705 return;
1706 }
1707
1708 // Nb images in the training database + the current image we want to detect
1709 // the object
1710 int nbImg = (int)(m_mapOfImages.size() + 1);
1711
1712 if (nbImg == 2) {
1713 // Only one training image, so we display the matching result side-by-side
1714 displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1715 } else {
1716 // Multiple training images, display them as a mosaic image
1717 int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1718 int nbWidth = nbImgSqrt;
1719 int nbHeight = nbImgSqrt;
1720
1721 if (nbImgSqrt * nbImgSqrt < nbImg) {
1722 nbWidth++;
1723 }
1724
1725 std::map<int, int> mapOfImageIdIndex;
1726 int cpt = 0;
1727 unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1728 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1729 ++it, cpt++) {
1730 mapOfImageIdIndex[it->first] = cpt;
1731
1732 if (maxW < it->second.getWidth()) {
1733 maxW = it->second.getWidth();
1734 }
1735
1736 if (maxH < it->second.getHeight()) {
1737 maxH = it->second.getHeight();
1738 }
1739 }
1740
1741 // Indexes of the current image in the grid computed to put preferably the
1742 // image in the center of the mosaic grid
1743 int medianI = nbHeight / 2;
1744 int medianJ = nbWidth / 2;
1745 int medianIndex = medianI * nbWidth + medianJ;
1746 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1747 vpImagePoint topLeftCorner;
1748 int current_class_id_index = 0;
1749 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1750 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1751 } else {
1752 // Shift of one unity the index of the training images which are after
1753 // the current image
1754 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1755 }
1756
1757 int indexI = current_class_id_index / nbWidth;
1758 int indexJ = current_class_id_index - (indexI * nbWidth);
1759 topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1760
1761 // Display cross for keypoints in the learning database
1762 vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1763 (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1764 }
1765
1766 vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1767 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1768 // Display cross for keypoints detected in the current image
1769 vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1770 (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1771 }
1772 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1773 // Display green circle for RANSAC inliers
1774 vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1775 (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1776 }
1777 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1778 // Display red circle for RANSAC outliers
1779 vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1780 (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1781 }
1782
1783 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1784 int current_class_id = 0;
1785 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1786 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1787 } else {
1788 // Shift of one unity the index of the training images which are after
1789 // the current image
1790 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1791 }
1792
1793 int indexI = current_class_id / nbWidth;
1794 int indexJ = current_class_id - (indexI * nbWidth);
1795
1796 vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1797 (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1798 vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1799 (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1800
1801 // Draw line for matching keypoints detected in the current image and
1802 // those detected in the training images
1803 vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1804 }
1805 }
1806}
1807
1818void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1819 std::vector<cv::Point3f> *trainPoints)
1820{
1821 double elapsedTime;
1822 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1823}
1824
1835void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1836 std::vector<cv::Point3f> *trainPoints)
1837{
1838 double elapsedTime;
1839 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1840}
1841
1852void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1853 std::vector<cv::Point3f> *trainPoints)
1854{
1855 double elapsedTime;
1856 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1857}
1858
1870void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1871 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1872{
1873 cv::Mat matImg;
1874 vpImageConvert::convert(I, matImg, false);
1875 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1876}
1877
1889void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1890 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1891{
1892 cv::Mat matImg;
1893 vpImageConvert::convert(I_color, matImg);
1894 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1895}
1896
1908void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1909 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1910{
1911 double t = vpTime::measureTimeMs();
1912 bool first = true;
1913
1914 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1915 itd != m_extractors.end(); ++itd) {
1916 if (first) {
1917 first = false;
1918 // Check if we have 3D object points information
1919 if (trainPoints != NULL && !trainPoints->empty()) {
1920 // Copy the input list of keypoints, keypoints that cannot be computed
1921 // are removed in the function compute
1922 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1923
1924 // Extract descriptors for the given list of keypoints
1925 itd->second->compute(matImg, keyPoints, descriptors);
1926
1927 if (keyPoints.size() != keyPoints_tmp.size()) {
1928 // Keypoints have been removed
1929 // Store the hash of a keypoint as the key and the index of the
1930 // keypoint as the value
1931 std::map<size_t, size_t> mapOfKeypointHashes;
1932 size_t cpt = 0;
1933 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1934 ++it, cpt++) {
1935 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1936 }
1937
1938 std::vector<cv::Point3f> trainPoints_tmp;
1939 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1940 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1941 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1942 }
1943 }
1944
1945 // Copy trainPoints_tmp to m_trainPoints
1946 *trainPoints = trainPoints_tmp;
1947 }
1948 } else {
1949 // Extract descriptors for the given list of keypoints
1950 itd->second->compute(matImg, keyPoints, descriptors);
1951 }
1952 } else {
1953 // Copy the input list of keypoints, keypoints that cannot be computed
1954 // are removed in the function compute
1955 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1956
1957 cv::Mat desc;
1958 // Extract descriptors for the given list of keypoints
1959 itd->second->compute(matImg, keyPoints, desc);
1960
1961 if (keyPoints.size() != keyPoints_tmp.size()) {
1962 // Keypoints have been removed
1963 // Store the hash of a keypoint as the key and the index of the
1964 // keypoint as the value
1965 std::map<size_t, size_t> mapOfKeypointHashes;
1966 size_t cpt = 0;
1967 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1968 ++it, cpt++) {
1969 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1970 }
1971
1972 std::vector<cv::Point3f> trainPoints_tmp;
1973 cv::Mat descriptors_tmp;
1974 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1975 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1976 if (trainPoints != NULL && !trainPoints->empty()) {
1977 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1978 }
1979
1980 if (!descriptors.empty()) {
1981 descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1982 }
1983 }
1984 }
1985
1986 if (trainPoints != NULL) {
1987 // Copy trainPoints_tmp to m_trainPoints
1988 *trainPoints = trainPoints_tmp;
1989 }
1990 // Copy descriptors_tmp to descriptors
1991 descriptors_tmp.copyTo(descriptors);
1992 }
1993
1994 // Merge descriptors horizontally
1995 if (descriptors.empty()) {
1996 desc.copyTo(descriptors);
1997 } else {
1998 cv::hconcat(descriptors, desc, descriptors);
1999 }
2000 }
2001 }
2002
2003 if (keyPoints.size() != (size_t)descriptors.rows) {
2004 std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2005 }
2006 elapsedTime = vpTime::measureTimeMs() - t;
2007}
2008
2012void vpKeyPoint::filterMatches()
2013{
2014 std::vector<cv::KeyPoint> queryKpts;
2015 std::vector<cv::Point3f> trainPts;
2016 std::vector<cv::DMatch> m;
2017
2018 if (m_useKnn) {
2019 // double max_dist = 0;
2020 // double min_dist = std::numeric_limits<double>::max(); // create an
2021 // error under Windows. To fix it we have to add #undef max
2022 double min_dist = DBL_MAX;
2023 double mean = 0.0;
2024 std::vector<double> distance_vec(m_knnMatches.size());
2025
2026 if (m_filterType == stdAndRatioDistanceThreshold) {
2027 for (size_t i = 0; i < m_knnMatches.size(); i++) {
2028 double dist = m_knnMatches[i][0].distance;
2029 mean += dist;
2030 distance_vec[i] = dist;
2031
2032 if (dist < min_dist) {
2033 min_dist = dist;
2034 }
2035 // if (dist > max_dist) {
2036 // max_dist = dist;
2037 //}
2038 }
2039 mean /= m_queryDescriptors.rows;
2040 }
2041
2042 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2043 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2044 double threshold = min_dist + stdev;
2045
2046 for (size_t i = 0; i < m_knnMatches.size(); i++) {
2047 if (m_knnMatches[i].size() >= 2) {
2048 // Calculate ratio of the descriptor distance between the two nearest
2049 // neighbors of the keypoint
2050 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2051 // float ratio = std::sqrt((vecMatches[i][0].distance *
2052 // vecMatches[i][0].distance)
2053 // / (vecMatches[i][1].distance *
2054 // vecMatches[i][1].distance));
2055 double dist = m_knnMatches[i][0].distance;
2056
2057 if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
2058 m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2059
2060 if (!m_trainPoints.empty()) {
2061 trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
2062 }
2063 queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
2064 }
2065 }
2066 }
2067 } else {
2068 // double max_dist = 0;
2069 // create an error under Windows. To fix it we have to add #undef max
2070 // double min_dist = std::numeric_limits<double>::max();
2071 double min_dist = DBL_MAX;
2072 double mean = 0.0;
2073 std::vector<double> distance_vec(m_matches.size());
2074 for (size_t i = 0; i < m_matches.size(); i++) {
2075 double dist = m_matches[i].distance;
2076 mean += dist;
2077 distance_vec[i] = dist;
2078
2079 if (dist < min_dist) {
2080 min_dist = dist;
2081 }
2082 // if (dist > max_dist) {
2083 // max_dist = dist;
2084 // }
2085 }
2086 mean /= m_queryDescriptors.rows;
2087
2088 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2089 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2090
2091 // Define a threshold where we keep all keypoints whose the descriptor
2092 // distance falls below a factor of the minimum descriptor distance (for
2093 // all the query keypoints) or below the minimum descriptor distance +
2094 // the standard deviation (calculated on all the query descriptor
2095 // distances)
2096 double threshold =
2097 m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
2098
2099 for (size_t i = 0; i < m_matches.size(); i++) {
2100 if (m_matches[i].distance <= threshold) {
2101 m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2102
2103 if (!m_trainPoints.empty()) {
2104 trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
2105 }
2106 queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
2107 }
2108 }
2109 }
2110
2111 if (m_useSingleMatchFilter) {
2112 // Eliminate matches where multiple query keypoints are matched to the
2113 // same train keypoint
2114 std::vector<cv::DMatch> mTmp;
2115 std::vector<cv::Point3f> trainPtsTmp;
2116 std::vector<cv::KeyPoint> queryKptsTmp;
2117
2118 std::map<int, int> mapOfTrainIdx;
2119 // Count the number of query points matched to the same train point
2120 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2121 mapOfTrainIdx[it->trainIdx]++;
2122 }
2123
2124 // Keep matches with only one correspondence
2125 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2126 if (mapOfTrainIdx[it->trainIdx] == 1) {
2127 mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
2128
2129 if (!m_trainPoints.empty()) {
2130 trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
2131 }
2132 queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
2133 }
2134 }
2135
2136 m_filteredMatches = mTmp;
2137 m_objectFilteredPoints = trainPtsTmp;
2138 m_queryFilteredKeyPoints = queryKptsTmp;
2139 } else {
2140 m_filteredMatches = m;
2141 m_objectFilteredPoints = trainPts;
2142 m_queryFilteredKeyPoints = queryKpts;
2143 }
2144}
2145
2153void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
2154{
2155 objectPoints = m_objectFilteredPoints;
2156}
2157
2165void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
2166{
2167 vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
2168}
2169
2178void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches) const
2179{
2180 if (matches) {
2181 keyPoints = m_queryFilteredKeyPoints;
2182 } else {
2183 keyPoints = m_queryKeyPoints;
2184 }
2185}
2186
2195void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches) const
2196{
2197 if (matches) {
2198 keyPoints = currentImagePointsList;
2199 } else {
2200 vpConvert::convertFromOpenCV(m_queryKeyPoints, keyPoints);
2201 }
2202}
2203
2209void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
2210
2216void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = referenceImagePointsList; }
2217
2224void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
2225
2232void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
2233
2238void vpKeyPoint::init()
2239{
2240// Require 2.4.0 <= opencv < 3.0.0
2241#if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2242 // The following line must be called in order to use SIFT or SURF
2243 if (!cv::initModule_nonfree()) {
2244 std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2245 }
2246#endif
2247
2248 // Use k-nearest neighbors (knn) to retrieve the two best matches for a
2249 // keypoint So this is useful only for ratioDistanceThreshold method
2250 if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2251 m_useKnn = true;
2252 }
2253
2254 initDetectors(m_detectorNames);
2255 initExtractors(m_extractorNames);
2256 initMatcher(m_matcherName);
2257}
2258
2264void vpKeyPoint::initDetector(const std::string &detectorName)
2265{
2266#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2267 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2268
2269 if (m_detectors[detectorName] == NULL) {
2270 std::stringstream ss_msg;
2271 ss_msg << "Fail to initialize the detector: " << detectorName
2272 << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2273 throw vpException(vpException::fatalError, ss_msg.str());
2274 }
2275#else
2276 std::string detectorNameTmp = detectorName;
2277 std::string pyramid = "Pyramid";
2278 std::size_t pos = detectorName.find(pyramid);
2279 bool usePyramid = false;
2280 if (pos != std::string::npos) {
2281 detectorNameTmp = detectorName.substr(pos + pyramid.size());
2282 usePyramid = true;
2283 }
2284
2285 if (detectorNameTmp == "SIFT") {
2286#if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
2287 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
2288 if (!usePyramid) {
2289 m_detectors[detectorNameTmp] = siftDetector;
2290 } else {
2291 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2292 }
2293#else
2294#if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
2295 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2296 // SIFT is no more patented since 09/03/2020
2297 cv::Ptr<cv::FeatureDetector> siftDetector;
2298 if (m_maxFeatures > 0) {
2299#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2300 siftDetector = cv::SIFT::create(m_maxFeatures);
2301#else
2302 siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
2303#endif
2304 } else {
2305#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2306 siftDetector = cv::SIFT::create();
2307#else
2308 siftDetector = cv::xfeatures2d::SIFT::create();
2309#endif
2310 }
2311 if (!usePyramid) {
2312 m_detectors[detectorNameTmp] = siftDetector;
2313 } else {
2314 std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
2315 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2316 }
2317#else
2318 std::stringstream ss_msg;
2319 ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2320 << " was not build with xFeatures2d module.";
2321 throw vpException(vpException::fatalError, ss_msg.str());
2322#endif
2323#endif
2324 } else if (detectorNameTmp == "SURF") {
2325#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2326 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2327 if (!usePyramid) {
2328 m_detectors[detectorNameTmp] = surfDetector;
2329 } else {
2330 std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
2331 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2332 }
2333#else
2334 std::stringstream ss_msg;
2335 ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2336 << " was not build with xFeatures2d module.";
2337 throw vpException(vpException::fatalError, ss_msg.str());
2338#endif
2339 } else if (detectorNameTmp == "FAST") {
2340 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2341 if (!usePyramid) {
2342 m_detectors[detectorNameTmp] = fastDetector;
2343 } else {
2344 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2345 }
2346 } else if (detectorNameTmp == "MSER") {
2347 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2348 if (!usePyramid) {
2349 m_detectors[detectorNameTmp] = fastDetector;
2350 } else {
2351 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2352 }
2353 } else if (detectorNameTmp == "ORB") {
2354 cv::Ptr<cv::FeatureDetector> orbDetector;
2355 if (m_maxFeatures > 0) {
2356 orbDetector = cv::ORB::create(m_maxFeatures);
2357 } else {
2358 orbDetector = cv::ORB::create();
2359 }
2360 if (!usePyramid) {
2361 m_detectors[detectorNameTmp] = orbDetector;
2362 } else {
2363 std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
2364 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2365 }
2366 } else if (detectorNameTmp == "BRISK") {
2367 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2368 if (!usePyramid) {
2369 m_detectors[detectorNameTmp] = briskDetector;
2370 } else {
2371 std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
2372 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2373 }
2374 } else if (detectorNameTmp == "KAZE") {
2375 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2376 if (!usePyramid) {
2377 m_detectors[detectorNameTmp] = kazeDetector;
2378 } else {
2379 std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
2380 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2381 }
2382 } else if (detectorNameTmp == "AKAZE") {
2383 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2384 if (!usePyramid) {
2385 m_detectors[detectorNameTmp] = akazeDetector;
2386 } else {
2387 std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
2388 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2389 }
2390 } else if (detectorNameTmp == "GFTT") {
2391 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2392 if (!usePyramid) {
2393 m_detectors[detectorNameTmp] = gfttDetector;
2394 } else {
2395 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2396 }
2397 } else if (detectorNameTmp == "SimpleBlob") {
2398 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2399 if (!usePyramid) {
2400 m_detectors[detectorNameTmp] = simpleBlobDetector;
2401 } else {
2402 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2403 }
2404 } else if (detectorNameTmp == "STAR") {
2405#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2406 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2407 if (!usePyramid) {
2408 m_detectors[detectorNameTmp] = starDetector;
2409 } else {
2410 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2411 }
2412#else
2413 std::stringstream ss_msg;
2414 ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2415 << " was not build with xFeatures2d module.";
2416 throw vpException(vpException::fatalError, ss_msg.str());
2417#endif
2418 } else if (detectorNameTmp == "AGAST") {
2419 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2420 if (!usePyramid) {
2421 m_detectors[detectorNameTmp] = agastDetector;
2422 } else {
2423 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2424 }
2425 } else if (detectorNameTmp == "MSD") {
2426#if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2427#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2428 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2429 if (!usePyramid) {
2430 m_detectors[detectorNameTmp] = msdDetector;
2431 } else {
2432 std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
2433 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2434 }
2435#else
2436 std::stringstream ss_msg;
2437 ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2438 << " was not build with xFeatures2d module.";
2439 throw vpException(vpException::fatalError, ss_msg.str());
2440#endif
2441#else
2442 std::stringstream ss_msg;
2443 ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
2444 << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
2445#endif
2446 } else {
2447 std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
2448 }
2449
2450 bool detectorInitialized = false;
2451 if (!usePyramid) {
2452 // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2453 detectorInitialized = !m_detectors[detectorNameTmp].empty();
2454 } else {
2455 // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2456 detectorInitialized = !m_detectors[detectorName].empty();
2457 }
2458
2459 if (!detectorInitialized) {
2460 std::stringstream ss_msg;
2461 ss_msg << "Fail to initialize the detector: " << detectorNameTmp
2462 << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2463 throw vpException(vpException::fatalError, ss_msg.str());
2464 }
2465#endif
2466}
2467
2474void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
2475{
2476 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2477 initDetector(*it);
2478 }
2479}
2480
2486void vpKeyPoint::initExtractor(const std::string &extractorName)
2487{
2488#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2489 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2490#else
2491 if (extractorName == "SIFT") {
2492#if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
2493 m_extractors[extractorName] = cv::SIFT::create();
2494#else
2495#if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
2496 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2497 // SIFT is no more patented since 09/03/2020
2498#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2499 m_extractors[extractorName] = cv::SIFT::create();
2500#else
2501 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2502#endif
2503#else
2504 std::stringstream ss_msg;
2505 ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2506 << " was not build with xFeatures2d module.";
2507 throw vpException(vpException::fatalError, ss_msg.str());
2508#endif
2509#endif
2510 } else if (extractorName == "SURF") {
2511#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2512 // Use extended set of SURF descriptors (128 instead of 64)
2513 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2514#else
2515 std::stringstream ss_msg;
2516 ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2517 << " was not build with xFeatures2d module.";
2518 throw vpException(vpException::fatalError, ss_msg.str());
2519#endif
2520 } else if (extractorName == "ORB") {
2521 m_extractors[extractorName] = cv::ORB::create();
2522 } else if (extractorName == "BRISK") {
2523 m_extractors[extractorName] = cv::BRISK::create();
2524 } else if (extractorName == "FREAK") {
2525#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2526 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2527#else
2528 std::stringstream ss_msg;
2529 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2530 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2531 throw vpException(vpException::fatalError, ss_msg.str());
2532#endif
2533 } else if (extractorName == "BRIEF") {
2534#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2535 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2536#else
2537 std::stringstream ss_msg;
2538 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2539 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2540 throw vpException(vpException::fatalError, ss_msg.str());
2541#endif
2542 } else if (extractorName == "KAZE") {
2543 m_extractors[extractorName] = cv::KAZE::create();
2544 } else if (extractorName == "AKAZE") {
2545 m_extractors[extractorName] = cv::AKAZE::create();
2546 } else if (extractorName == "DAISY") {
2547#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2548 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2549#else
2550 std::stringstream ss_msg;
2551 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2552 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2553 throw vpException(vpException::fatalError, ss_msg.str());
2554#endif
2555 } else if (extractorName == "LATCH") {
2556#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2557 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2558#else
2559 std::stringstream ss_msg;
2560 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2561 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2562 throw vpException(vpException::fatalError, ss_msg.str());
2563#endif
2564 } else if (extractorName == "LUCID") {
2565#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2566 // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2567 // Not possible currently, need a color image
2568 throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2569#else
2570 std::stringstream ss_msg;
2571 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2572 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2573 throw vpException(vpException::fatalError, ss_msg.str());
2574#endif
2575 } else if (extractorName == "VGG") {
2576#if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2577#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2578 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2579#else
2580 std::stringstream ss_msg;
2581 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2582 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2583 throw vpException(vpException::fatalError, ss_msg.str());
2584#endif
2585#else
2586 std::stringstream ss_msg;
2587 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2588 << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2589 throw vpException(vpException::fatalError, ss_msg.str());
2590#endif
2591 } else if (extractorName == "BoostDesc") {
2592#if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2593#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2594 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2595#else
2596 std::stringstream ss_msg;
2597 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2598 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2599 throw vpException(vpException::fatalError, ss_msg.str());
2600#endif
2601#else
2602 std::stringstream ss_msg;
2603 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2604 << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2605 throw vpException(vpException::fatalError, ss_msg.str());
2606#endif
2607 } else {
2608 std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2609 }
2610#endif
2611
2612 if (!m_extractors[extractorName]) { // if null
2613 std::stringstream ss_msg;
2614 ss_msg << "Fail to initialize the extractor: " << extractorName
2615 << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2616 throw vpException(vpException::fatalError, ss_msg.str());
2617 }
2618
2619#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2620 if (extractorName == "SURF") {
2621 // Use extended set of SURF descriptors (128 instead of 64)
2622 m_extractors[extractorName]->set("extended", 1);
2623 }
2624#endif
2625}
2626
2633void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2634{
2635 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2636 initExtractor(*it);
2637 }
2638
2639 int descriptorType = CV_32F;
2640 bool firstIteration = true;
2641 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2642 it != m_extractors.end(); ++it) {
2643 if (firstIteration) {
2644 firstIteration = false;
2645 descriptorType = it->second->descriptorType();
2646 } else {
2647 if (descriptorType != it->second->descriptorType()) {
2648 throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2649 }
2650 }
2651 }
2652}
2653
2654void vpKeyPoint::initFeatureNames()
2655{
2656// Create map enum to string
2657#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2658 m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2659 m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2660 m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2661 m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2662 m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2663 m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2664#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2665 m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2666#endif
2667#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2668 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2669 m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2670#endif
2671#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2672 m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2673#endif
2674#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2675 m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2676 m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2677 m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2678#endif
2679#if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2680 m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2681#endif
2682#endif
2683
2684#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2685 m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2686 m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2687#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2688 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2689 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2690#endif
2691#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2692 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2693 m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2694#endif
2695#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2696 m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2697#endif
2698#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2699 m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2700 m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2701#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2702 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2703 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2704#endif
2705#endif
2706#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2707 m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2708 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2709#endif
2710#endif
2711}
2712
2718void vpKeyPoint::initMatcher(const std::string &matcherName)
2719{
2720 int descriptorType = CV_32F;
2721 bool firstIteration = true;
2722 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2723 it != m_extractors.end(); ++it) {
2724 if (firstIteration) {
2725 firstIteration = false;
2726 descriptorType = it->second->descriptorType();
2727 } else {
2728 if (descriptorType != it->second->descriptorType()) {
2729 throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2730 }
2731 }
2732 }
2733
2734 if (matcherName == "FlannBased") {
2735 if (m_extractors.empty()) {
2736 std::cout << "Warning: No extractor initialized, by default use "
2737 "floating values (CV_32F) "
2738 "for descriptor type !"
2739 << std::endl;
2740 }
2741
2742 if (descriptorType == CV_8U) {
2743#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2744 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2745#else
2746 m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2747#endif
2748 } else {
2749#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2750 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2751#else
2752 m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2753#endif
2754 }
2755 } else {
2756 m_matcher = cv::DescriptorMatcher::create(matcherName);
2757 }
2758
2759#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2760 if (m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") {
2761 m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2762 }
2763#endif
2764
2765 if (!m_matcher) { // if null
2766 std::stringstream ss_msg;
2767 ss_msg << "Fail to initialize the matcher: " << matcherName
2768 << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2769 throw vpException(vpException::fatalError, ss_msg.str());
2770 }
2771}
2772
2782 vpImage<unsigned char> &IMatching)
2783{
2784 vpImagePoint topLeftCorner(0, 0);
2785 IMatching.insert(IRef, topLeftCorner);
2786 topLeftCorner = vpImagePoint(0, IRef.getWidth());
2787 IMatching.insert(ICurrent, topLeftCorner);
2788}
2789
2799 vpImage<vpRGBa> &IMatching)
2800{
2801 vpImagePoint topLeftCorner(0, 0);
2802 IMatching.insert(IRef, topLeftCorner);
2803 topLeftCorner = vpImagePoint(0, IRef.getWidth());
2804 IMatching.insert(ICurrent, topLeftCorner);
2805}
2806
2815{
2816 // Nb images in the training database + the current image we want to detect
2817 // the object
2818 int nbImg = (int)(m_mapOfImages.size() + 1);
2819
2820 if (m_mapOfImages.empty()) {
2821 std::cerr << "There is no training image loaded !" << std::endl;
2822 return;
2823 }
2824
2825 if (nbImg == 2) {
2826 // Only one training image, so we display them side by side
2827 insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2828 } else {
2829 // Multiple training images, display them as a mosaic image
2830 int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2831 int nbWidth = nbImgSqrt;
2832 int nbHeight = nbImgSqrt;
2833
2834 if (nbImgSqrt * nbImgSqrt < nbImg) {
2835 nbWidth++;
2836 }
2837
2838 unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2839 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2840 ++it) {
2841 if (maxW < it->second.getWidth()) {
2842 maxW = it->second.getWidth();
2843 }
2844
2845 if (maxH < it->second.getHeight()) {
2846 maxH = it->second.getHeight();
2847 }
2848 }
2849
2850 // Indexes of the current image in the grid made in order to the image is
2851 // in the center square in the mosaic grid
2852 int medianI = nbHeight / 2;
2853 int medianJ = nbWidth / 2;
2854 int medianIndex = medianI * nbWidth + medianJ;
2855
2856 int cpt = 0;
2857 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2858 ++it, cpt++) {
2859 int local_cpt = cpt;
2860 if (cpt >= medianIndex) {
2861 // Shift of one unity the index of the training images which are after
2862 // the current image
2863 local_cpt++;
2864 }
2865 int indexI = local_cpt / nbWidth;
2866 int indexJ = local_cpt - (indexI * nbWidth);
2867 vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2868
2869 IMatching.insert(it->second, topLeftCorner);
2870 }
2871
2872 vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2873 IMatching.insert(ICurrent, topLeftCorner);
2874 }
2875}
2876
2885{
2886 // Nb images in the training database + the current image we want to detect
2887 // the object
2888 int nbImg = (int)(m_mapOfImages.size() + 1);
2889
2890 if (m_mapOfImages.empty()) {
2891 std::cerr << "There is no training image loaded !" << std::endl;
2892 return;
2893 }
2894
2895 if (nbImg == 2) {
2896 // Only one training image, so we display them side by side
2897 vpImage<vpRGBa> IRef;
2898 vpImageConvert::convert(m_mapOfImages.begin()->second, IRef);
2899 insertImageMatching(IRef, ICurrent, IMatching);
2900 } else {
2901 // Multiple training images, display them as a mosaic image
2902 int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2903 int nbWidth = nbImgSqrt;
2904 int nbHeight = nbImgSqrt;
2905
2906 if (nbImgSqrt * nbImgSqrt < nbImg) {
2907 nbWidth++;
2908 }
2909
2910 unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2911 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2912 ++it) {
2913 if (maxW < it->second.getWidth()) {
2914 maxW = it->second.getWidth();
2915 }
2916
2917 if (maxH < it->second.getHeight()) {
2918 maxH = it->second.getHeight();
2919 }
2920 }
2921
2922 // Indexes of the current image in the grid made in order to the image is
2923 // in the center square in the mosaic grid
2924 int medianI = nbHeight / 2;
2925 int medianJ = nbWidth / 2;
2926 int medianIndex = medianI * nbWidth + medianJ;
2927
2928 int cpt = 0;
2929 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2930 ++it, cpt++) {
2931 int local_cpt = cpt;
2932 if (cpt >= medianIndex) {
2933 // Shift of one unity the index of the training images which are after
2934 // the current image
2935 local_cpt++;
2936 }
2937 int indexI = local_cpt / nbWidth;
2938 int indexJ = local_cpt - (indexI * nbWidth);
2939 vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2940
2941 vpImage<vpRGBa> IRef;
2942 vpImageConvert::convert(it->second, IRef);
2943 IMatching.insert(IRef, topLeftCorner);
2944 }
2945
2946 vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2947 IMatching.insert(ICurrent, topLeftCorner);
2948 }
2949}
2950
2956void vpKeyPoint::loadConfigFile(const std::string &configFile)
2957{
2959
2960 try {
2961 // Reset detector and extractor
2962 m_detectorNames.clear();
2963 m_extractorNames.clear();
2964 m_detectors.clear();
2965 m_extractors.clear();
2966
2967 std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2968 "************ "
2969 << std::endl;
2970 xmlp.parse(configFile);
2971
2972 m_detectorNames.push_back(xmlp.getDetectorName());
2973 m_extractorNames.push_back(xmlp.getExtractorName());
2974 m_matcherName = xmlp.getMatcherName();
2975
2976 switch (xmlp.getMatchingMethod()) {
2978 m_filterType = constantFactorDistanceThreshold;
2979 break;
2980
2982 m_filterType = stdDistanceThreshold;
2983 break;
2984
2986 m_filterType = ratioDistanceThreshold;
2987 break;
2988
2990 m_filterType = stdAndRatioDistanceThreshold;
2991 break;
2992
2994 m_filterType = noFilterMatching;
2995 break;
2996
2997 default:
2998 break;
2999 }
3000
3001 m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
3002 m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
3003
3004 m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
3005 m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
3006 m_nbRansacIterations = xmlp.getNbRansacIterations();
3007 m_ransacReprojectionError = xmlp.getRansacReprojectionError();
3008 m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
3009 m_ransacThreshold = xmlp.getRansacThreshold();
3010 m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
3011
3012 if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
3013 m_useKnn = true;
3014 } else {
3015 m_useKnn = false;
3016 }
3017
3018 init();
3019 } catch (...) {
3020 throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
3021 }
3022}
3023
3032void vpKeyPoint::loadLearningData(const std::string &filename, bool binaryMode, bool append)
3033{
3034 int startClassId = 0;
3035 int startImageId = 0;
3036 if (!append) {
3037 m_trainKeyPoints.clear();
3038 m_trainPoints.clear();
3039 m_mapOfImageId.clear();
3040 m_mapOfImages.clear();
3041 } else {
3042 // In append case, find the max index of keypoint class Id
3043 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
3044 if (startClassId < it->first) {
3045 startClassId = it->first;
3046 }
3047 }
3048
3049 // In append case, find the max index of images Id
3050 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3051 ++it) {
3052 if (startImageId < it->first) {
3053 startImageId = it->first;
3054 }
3055 }
3056 }
3057
3058 // Get parent directory
3059 std::string parent = vpIoTools::getParent(filename);
3060 if (!parent.empty()) {
3061 parent += "/";
3062 }
3063
3064 if (binaryMode) {
3065 std::ifstream file(filename.c_str(), std::ifstream::binary);
3066 if (!file.is_open()) {
3067 throw vpException(vpException::ioError, "Cannot open the file.");
3068 }
3069
3070 // Read info about training images
3071 int nbImgs = 0;
3072 vpIoTools::readBinaryValueLE(file, nbImgs);
3073
3074#if !defined(VISP_HAVE_MODULE_IO)
3075 if (nbImgs > 0) {
3076 std::cout << "Warning: The learning file contains image data that will "
3077 "not be loaded as visp_io module "
3078 "is not available !"
3079 << std::endl;
3080 }
3081#endif
3082
3083 for (int i = 0; i < nbImgs; i++) {
3084 // Read image_id
3085 int id = 0;
3087
3088 int length = 0;
3089 vpIoTools::readBinaryValueLE(file, length);
3090 // Will contain the path to the training images
3091 char *path = new char[length + 1]; // char path[length + 1];
3092
3093 for (int cpt = 0; cpt < length; cpt++) {
3094 char c;
3095 file.read((char *)(&c), sizeof(c));
3096 path[cpt] = c;
3097 }
3098 path[length] = '\0';
3099
3101#ifdef VISP_HAVE_MODULE_IO
3102 if (vpIoTools::isAbsolutePathname(std::string(path))) {
3103 vpImageIo::read(I, path);
3104 } else {
3105 vpImageIo::read(I, parent + path);
3106 }
3107
3108 // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3109 m_mapOfImages[id + startImageId] = I;
3110#endif
3111
3112 // Delete path
3113 delete[] path;
3114 }
3115
3116 // Read if 3D point information are saved or not
3117 int have3DInfoInt = 0;
3118 vpIoTools::readBinaryValueLE(file, have3DInfoInt);
3119 bool have3DInfo = have3DInfoInt != 0;
3120
3121 // Read the number of descriptors
3122 int nRows = 0;
3123 vpIoTools::readBinaryValueLE(file, nRows);
3124
3125 // Read the size of the descriptor
3126 int nCols = 0;
3127 vpIoTools::readBinaryValueLE(file, nCols);
3128
3129 // Read the type of the descriptor
3130 int descriptorType = 5; // CV_32F
3131 vpIoTools::readBinaryValueLE(file, descriptorType);
3132
3133 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3134 for (int i = 0; i < nRows; i++) {
3135 // Read information about keyPoint
3136 float u, v, size, angle, response;
3137 int octave, class_id, image_id;
3140 vpIoTools::readBinaryValueLE(file, size);
3141 vpIoTools::readBinaryValueLE(file, angle);
3142 vpIoTools::readBinaryValueLE(file, response);
3143 vpIoTools::readBinaryValueLE(file, octave);
3144 vpIoTools::readBinaryValueLE(file, class_id);
3145 vpIoTools::readBinaryValueLE(file, image_id);
3146 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3147 m_trainKeyPoints.push_back(keyPoint);
3148
3149 if (image_id != -1) {
3150#ifdef VISP_HAVE_MODULE_IO
3151 // No training images if image_id == -1
3152 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3153#endif
3154 }
3155
3156 if (have3DInfo) {
3157 // Read oX, oY, oZ
3158 float oX, oY, oZ;
3162 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3163 }
3164
3165 for (int j = 0; j < nCols; j++) {
3166 // Read the value of the descriptor
3167 switch (descriptorType) {
3168 case CV_8U: {
3169 unsigned char value;
3170 file.read((char *)(&value), sizeof(value));
3171 trainDescriptorsTmp.at<unsigned char>(i, j) = value;
3172 } break;
3173
3174 case CV_8S: {
3175 char value;
3176 file.read((char *)(&value), sizeof(value));
3177 trainDescriptorsTmp.at<char>(i, j) = value;
3178 } break;
3179
3180 case CV_16U: {
3181 unsigned short int value;
3182 vpIoTools::readBinaryValueLE(file, value);
3183 trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
3184 } break;
3185
3186 case CV_16S: {
3187 short int value;
3188 vpIoTools::readBinaryValueLE(file, value);
3189 trainDescriptorsTmp.at<short int>(i, j) = value;
3190 } break;
3191
3192 case CV_32S: {
3193 int value;
3194 vpIoTools::readBinaryValueLE(file, value);
3195 trainDescriptorsTmp.at<int>(i, j) = value;
3196 } break;
3197
3198 case CV_32F: {
3199 float value;
3200 vpIoTools::readBinaryValueLE(file, value);
3201 trainDescriptorsTmp.at<float>(i, j) = value;
3202 } break;
3203
3204 case CV_64F: {
3205 double value;
3206 vpIoTools::readBinaryValueLE(file, value);
3207 trainDescriptorsTmp.at<double>(i, j) = value;
3208 } break;
3209
3210 default: {
3211 float value;
3212 vpIoTools::readBinaryValueLE(file, value);
3213 trainDescriptorsTmp.at<float>(i, j) = value;
3214 } break;
3215 }
3216 }
3217 }
3218
3219 if (!append || m_trainDescriptors.empty()) {
3220 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3221 } else {
3222 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3223 }
3224
3225 file.close();
3226 } else {
3227 pugi::xml_document doc;
3228
3229 /*parse the file and get the DOM */
3230 if (!doc.load_file(filename.c_str())) {
3231 throw vpException(vpException::ioError, "Error with file: %s", filename.c_str());
3232 }
3233
3234 pugi::xml_node root_element = doc.document_element();
3235
3236 int descriptorType = CV_32F; // float
3237 int nRows = 0, nCols = 0;
3238 int i = 0, j = 0;
3239
3240 cv::Mat trainDescriptorsTmp;
3241
3242 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
3243 first_level_node = first_level_node.next_sibling()) {
3244
3245 std::string name(first_level_node.name());
3246 if (first_level_node.type() == pugi::node_element && name == "TrainingImageInfo") {
3247
3248 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
3249 image_info_node = image_info_node.next_sibling()) {
3250 name = std::string(image_info_node.name());
3251
3252 if (name == "trainImg") {
3253 // Read image_id
3254 int id = image_info_node.attribute("image_id").as_int();
3255
3257#ifdef VISP_HAVE_MODULE_IO
3258 std::string path(image_info_node.text().as_string());
3259 // Read path to the training images
3260 if (vpIoTools::isAbsolutePathname(std::string(path))) {
3261 vpImageIo::read(I, path);
3262 } else {
3263 vpImageIo::read(I, parent + path);
3264 }
3265
3266 // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3267 m_mapOfImages[id + startImageId] = I;
3268#endif
3269 }
3270 }
3271 } else if (first_level_node.type() == pugi::node_element && name == "DescriptorsInfo") {
3272 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3273 descriptors_info_node = descriptors_info_node.next_sibling()) {
3274 if (descriptors_info_node.type() == pugi::node_element) {
3275 name = std::string(descriptors_info_node.name());
3276
3277 if (name == "nrows") {
3278 nRows = descriptors_info_node.text().as_int();
3279 } else if (name == "ncols") {
3280 nCols = descriptors_info_node.text().as_int();
3281 } else if (name == "type") {
3282 descriptorType = descriptors_info_node.text().as_int();
3283 }
3284 }
3285 }
3286
3287 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3288 } else if (first_level_node.type() == pugi::node_element && name == "DescriptorInfo") {
3289 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3290 int octave = 0, class_id = 0, image_id = 0;
3291 double oX = 0.0, oY = 0.0, oZ = 0.0;
3292
3293 std::stringstream ss;
3294
3295 for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
3296 point_node = point_node.next_sibling()) {
3297 if (point_node.type() == pugi::node_element) {
3298 name = std::string(point_node.name());
3299
3300 // Read information about keypoints
3301 if (name == "u") {
3302 u = point_node.text().as_double();
3303 } else if (name == "v") {
3304 v = point_node.text().as_double();
3305 } else if (name == "size") {
3306 size = point_node.text().as_double();
3307 } else if (name == "angle") {
3308 angle = point_node.text().as_double();
3309 } else if (name == "response") {
3310 response = point_node.text().as_double();
3311 } else if (name == "octave") {
3312 octave = point_node.text().as_int();
3313 } else if (name == "class_id") {
3314 class_id = point_node.text().as_int();
3315 cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
3316 (class_id + startClassId));
3317 m_trainKeyPoints.push_back(keyPoint);
3318 } else if (name == "image_id") {
3319 image_id = point_node.text().as_int();
3320 if (image_id != -1) {
3321#ifdef VISP_HAVE_MODULE_IO
3322 // No training images if image_id == -1
3323 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3324#endif
3325 }
3326 } else if (name == "oX") {
3327 oX = point_node.text().as_double();
3328 } else if (name == "oY") {
3329 oY = point_node.text().as_double();
3330 } else if (name == "oZ") {
3331 oZ = point_node.text().as_double();
3332 m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
3333 } else if (name == "desc") {
3334 j = 0;
3335
3336 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3337 descriptor_value_node = descriptor_value_node.next_sibling()) {
3338
3339 if (descriptor_value_node.type() == pugi::node_element) {
3340 // Read descriptors values
3341 std::string parseStr(descriptor_value_node.text().as_string());
3342 ss.clear();
3343 ss.str(parseStr);
3344
3345 if (!ss.fail()) {
3346 switch (descriptorType) {
3347 case CV_8U: {
3348 // Parse the numeric value [0 ; 255] to an int
3349 int parseValue;
3350 ss >> parseValue;
3351 trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
3352 } break;
3353
3354 case CV_8S:
3355 // Parse the numeric value [-128 ; 127] to an int
3356 int parseValue;
3357 ss >> parseValue;
3358 trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
3359 break;
3360
3361 case CV_16U:
3362 ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
3363 break;
3364
3365 case CV_16S:
3366 ss >> trainDescriptorsTmp.at<short int>(i, j);
3367 break;
3368
3369 case CV_32S:
3370 ss >> trainDescriptorsTmp.at<int>(i, j);
3371 break;
3372
3373 case CV_32F:
3374 ss >> trainDescriptorsTmp.at<float>(i, j);
3375 break;
3376
3377 case CV_64F:
3378 ss >> trainDescriptorsTmp.at<double>(i, j);
3379 break;
3380
3381 default:
3382 ss >> trainDescriptorsTmp.at<float>(i, j);
3383 break;
3384 }
3385 } else {
3386 std::cerr << "Error when converting:" << ss.str() << std::endl;
3387 }
3388
3389 j++;
3390 }
3391 }
3392 }
3393 }
3394 }
3395 i++;
3396 }
3397 }
3398
3399 if (!append || m_trainDescriptors.empty()) {
3400 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3401 } else {
3402 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3403 }
3404 }
3405
3406 // Convert OpenCV type to ViSP type for compatibility
3408 vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
3409
3410 // Add train descriptors in matcher object
3411 m_matcher->clear();
3412 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3413
3414 // Set _reference_computed to true as we load a learning file
3415 _reference_computed = true;
3416
3417 // Set m_currentImageId
3418 m_currentImageId = (int)m_mapOfImages.size();
3419}
3420
3429void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
3430 std::vector<cv::DMatch> &matches, double &elapsedTime)
3431{
3432 double t = vpTime::measureTimeMs();
3433
3434 if (m_useKnn) {
3435 m_knnMatches.clear();
3436
3437 if (m_useMatchTrainToQuery) {
3438 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3439
3440 // Match train descriptors to query descriptors
3441 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3442 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3443
3444 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3445 it1 != knnMatchesTmp.end(); ++it1) {
3446 std::vector<cv::DMatch> tmp;
3447 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3448 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3449 }
3450 m_knnMatches.push_back(tmp);
3451 }
3452
3453 matches.resize(m_knnMatches.size());
3454 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3455 } else {
3456 // Match query descriptors to train descriptors
3457 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3458 matches.resize(m_knnMatches.size());
3459 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3460 }
3461 } else {
3462 matches.clear();
3463
3464 if (m_useMatchTrainToQuery) {
3465 std::vector<cv::DMatch> matchesTmp;
3466 // Match train descriptors to query descriptors
3467 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3468 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3469
3470 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3471 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3472 }
3473 } else {
3474 // Match query descriptors to train descriptors
3475 m_matcher->match(queryDescriptors, matches);
3476 }
3477 }
3478 elapsedTime = vpTime::measureTimeMs() - t;
3479}
3480
3488unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
3489
3497unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color) { return matchPoint(I_color, vpRect()); }
3498
3509unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
3510 unsigned int width)
3511{
3512 return matchPoint(I, vpRect(iP, width, height));
3513}
3514
3525unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
3526 unsigned int width)
3527{
3528 return matchPoint(I_color, vpRect(iP, width, height));
3529}
3530
3539unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
3540{
3541 if (m_trainDescriptors.empty()) {
3542 std::cerr << "Reference is empty." << std::endl;
3543 if (!_reference_computed) {
3544 std::cerr << "Reference is not computed." << std::endl;
3545 }
3546 std::cerr << "Matching is not possible." << std::endl;
3547
3548 return 0;
3549 }
3550
3551 if (m_useAffineDetection) {
3552 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3553 std::vector<cv::Mat> listOfQueryDescriptors;
3554
3555 // Detect keypoints and extract descriptors on multiple images
3556 detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3557
3558 // Flatten the different train lists
3559 m_queryKeyPoints.clear();
3560 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3561 it != listOfQueryKeyPoints.end(); ++it) {
3562 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3563 }
3564
3565 bool first = true;
3566 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3567 ++it) {
3568 if (first) {
3569 first = false;
3570 it->copyTo(m_queryDescriptors);
3571 } else {
3572 m_queryDescriptors.push_back(*it);
3573 }
3574 }
3575 } else {
3576 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3577 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3578 }
3579
3580 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3581}
3582
3591unsigned int vpKeyPoint::matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors)
3592{
3593 m_queryKeyPoints = queryKeyPoints;
3594 m_queryDescriptors = queryDescriptors;
3595
3596 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3597
3598 if (m_filterType != noFilterMatching) {
3599 m_queryFilteredKeyPoints.clear();
3600 m_objectFilteredPoints.clear();
3601 m_filteredMatches.clear();
3602
3603 filterMatches();
3604 } else {
3605 if (m_useMatchTrainToQuery) {
3606 // Add only query keypoints matched with a train keypoints
3607 m_queryFilteredKeyPoints.clear();
3608 m_filteredMatches.clear();
3609 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3610 m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3611 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3612 }
3613 } else {
3614 m_queryFilteredKeyPoints = m_queryKeyPoints;
3615 m_filteredMatches = m_matches;
3616 }
3617
3618 if (!m_trainPoints.empty()) {
3619 m_objectFilteredPoints.clear();
3620 // Add 3D object points such as the same index in
3621 // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3622 // matches to the same train object
3623 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3624 // m_matches is normally ordered following the queryDescriptor index
3625 m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3626 }
3627 }
3628 }
3629
3630 // Convert OpenCV type to ViSP type for compatibility
3631 vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3633
3634 return static_cast<unsigned int>(m_filteredMatches.size());
3635}
3636
3645unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
3646{
3647 vpImageConvert::convert(I_color, m_I);
3648 return matchPoint(m_I, rectangle);
3649}
3650
3664 bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3665{
3666 double error, elapsedTime;
3667 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3668}
3669
3683 bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3684{
3685 double error, elapsedTime;
3686 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3687}
3688
3705 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3706 const vpRect &rectangle)
3707{
3708 // Check if we have training descriptors
3709 if (m_trainDescriptors.empty()) {
3710 std::cerr << "Reference is empty." << std::endl;
3711 if (!_reference_computed) {
3712 std::cerr << "Reference is not computed." << std::endl;
3713 }
3714 std::cerr << "Matching is not possible." << std::endl;
3715
3716 return false;
3717 }
3718
3719 if (m_useAffineDetection) {
3720 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3721 std::vector<cv::Mat> listOfQueryDescriptors;
3722
3723 // Detect keypoints and extract descriptors on multiple images
3724 detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3725
3726 // Flatten the different train lists
3727 m_queryKeyPoints.clear();
3728 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3729 it != listOfQueryKeyPoints.end(); ++it) {
3730 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3731 }
3732
3733 bool first = true;
3734 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3735 ++it) {
3736 if (first) {
3737 first = false;
3738 it->copyTo(m_queryDescriptors);
3739 } else {
3740 m_queryDescriptors.push_back(*it);
3741 }
3742 }
3743 } else {
3744 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3745 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3746 }
3747
3748 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3749
3750 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3751
3752 if (m_filterType != noFilterMatching) {
3753 m_queryFilteredKeyPoints.clear();
3754 m_objectFilteredPoints.clear();
3755 m_filteredMatches.clear();
3756
3757 filterMatches();
3758 } else {
3759 if (m_useMatchTrainToQuery) {
3760 // Add only query keypoints matched with a train keypoints
3761 m_queryFilteredKeyPoints.clear();
3762 m_filteredMatches.clear();
3763 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3764 m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3765 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3766 }
3767 } else {
3768 m_queryFilteredKeyPoints = m_queryKeyPoints;
3769 m_filteredMatches = m_matches;
3770 }
3771
3772 if (!m_trainPoints.empty()) {
3773 m_objectFilteredPoints.clear();
3774 // Add 3D object points such as the same index in
3775 // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3776 // matches to the same train object
3777 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3778 // m_matches is normally ordered following the queryDescriptor index
3779 m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3780 }
3781 }
3782 }
3783
3784 // Convert OpenCV type to ViSP type for compatibility
3785 vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3787
3788 // error = std::numeric_limits<double>::max(); // create an error under
3789 // Windows. To fix it we have to add #undef max
3790 error = DBL_MAX;
3791 m_ransacInliers.clear();
3792 m_ransacOutliers.clear();
3793
3794 if (m_useRansacVVS) {
3795 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3796 size_t cpt = 0;
3797 // Create a list of vpPoint with 2D coordinates (current keypoint
3798 // location) + 3D coordinates (world/object coordinates)
3799 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3800 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3801 vpPoint pt;
3802 pt.setWorldCoordinates(it->x, it->y, it->z);
3803
3804 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3805
3806 double x = 0.0, y = 0.0;
3808 pt.set_x(x);
3809 pt.set_y(y);
3810
3811 objectVpPoints[cpt] = pt;
3812 }
3813
3814 std::vector<vpPoint> inliers;
3815 std::vector<unsigned int> inlierIndex;
3816
3817 bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3818
3819 std::map<unsigned int, bool> mapOfInlierIndex;
3820 m_matchRansacKeyPointsToPoints.clear();
3821
3822 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3823 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3824 m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3825 mapOfInlierIndex[*it] = true;
3826 }
3827
3828 for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3829 if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3830 m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3831 }
3832 }
3833
3834 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3835
3836 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3837 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3838 m_ransacInliers.begin(), matchRansacToVpImage);
3839
3840 elapsedTime += m_poseTime;
3841
3842 return res;
3843 } else {
3844 std::vector<cv::Point2f> imageFilteredPoints;
3845 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3846 std::vector<int> inlierIndex;
3847 bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3848
3849 std::map<int, bool> mapOfInlierIndex;
3850 m_matchRansacKeyPointsToPoints.clear();
3851
3852 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3853 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3854 m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3855 mapOfInlierIndex[*it] = true;
3856 }
3857
3858 for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3859 if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3860 m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3861 }
3862 }
3863
3864 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3865
3866 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3867 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3868 m_ransacInliers.begin(), matchRansacToVpImage);
3869
3870 elapsedTime += m_poseTime;
3871
3872 return res;
3873 }
3874}
3875
3892 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3893 const vpRect &rectangle)
3894{
3895 vpImageConvert::convert(I_color, m_I);
3896 return (matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3897}
3898
3920 vpImagePoint &centerOfGravity, const bool isPlanarObject,
3921 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3922 double *meanDescriptorDistance, double *detection_score, const vpRect &rectangle)
3923{
3924 if (imPts1 != NULL && imPts2 != NULL) {
3925 imPts1->clear();
3926 imPts2->clear();
3927 }
3928
3929 matchPoint(I, rectangle);
3930
3931 double meanDescriptorDistanceTmp = 0.0;
3932 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3933 meanDescriptorDistanceTmp += (double)it->distance;
3934 }
3935
3936 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3937 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3938
3939 if (meanDescriptorDistance != NULL) {
3940 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3941 }
3942 if (detection_score != NULL) {
3943 *detection_score = score;
3944 }
3945
3946 if (m_filteredMatches.size() >= 4) {
3947 // Training / Reference 2D points
3948 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3949 // Query / Current 2D points
3950 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3951
3952 for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3953 points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3954 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3955 }
3956
3957 std::vector<vpImagePoint> inliers;
3958 if (isPlanarObject) {
3959#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3960 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3961#else
3962 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3963#endif
3964
3965 for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3966 // Compute reprojection error
3967 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3968 realPoint.at<double>(0, 0) = points1[i].x;
3969 realPoint.at<double>(1, 0) = points1[i].y;
3970 realPoint.at<double>(2, 0) = 1.f;
3971
3972 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3973 double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3974 double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3975 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3976
3977 if (reprojectionError < 6.0) {
3978 inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3979 if (imPts1 != NULL) {
3980 imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3981 }
3982
3983 if (imPts2 != NULL) {
3984 imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3985 }
3986 }
3987 }
3988 } else if (m_filteredMatches.size() >= 8) {
3989 cv::Mat fundamentalInliers;
3990 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3991
3992 for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3993 if (fundamentalInliers.at<uchar>((int)i, 0)) {
3994 inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3995
3996 if (imPts1 != NULL) {
3997 imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3998 }
3999
4000 if (imPts2 != NULL) {
4001 imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
4002 }
4003 }
4004 }
4005 }
4006
4007 if (!inliers.empty()) {
4008 // Build a polygon with the list of inlier keypoints detected in the
4009 // current image to get the bounding box
4010 vpPolygon polygon(inliers);
4011 boundingBox = polygon.getBoundingBox();
4012
4013 // Compute the center of gravity
4014 double meanU = 0.0, meanV = 0.0;
4015 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
4016 meanU += it->get_u();
4017 meanV += it->get_v();
4018 }
4019
4020 meanU /= (double)inliers.size();
4021 meanV /= (double)inliers.size();
4022
4023 centerOfGravity.set_u(meanU);
4024 centerOfGravity.set_v(meanV);
4025 }
4026 } else {
4027 // Too few matches
4028 return false;
4029 }
4030
4031 if (m_detectionMethod == detectionThreshold) {
4032 return meanDescriptorDistanceTmp < m_detectionThreshold;
4033 } else {
4034 return score > m_detectionScore;
4035 }
4036}
4037
4058 vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
4059 vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
4060 const vpRect &rectangle)
4061{
4062 bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
4063 if (isMatchOk) {
4064 // Use the pose estimated to project the model points in the image
4065 vpPoint pt;
4066 vpImagePoint imPt;
4067 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
4068 size_t cpt = 0;
4069 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4070 pt = *it;
4071 pt.project(cMo);
4072 vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
4073 modelImagePoints[cpt] = imPt;
4074 }
4075
4076 // Build a polygon with the list of model image points to get the bounding
4077 // box
4078 vpPolygon polygon(modelImagePoints);
4079 boundingBox = polygon.getBoundingBox();
4080
4081 // Compute the center of gravity of the current inlier keypoints
4082 double meanU = 0.0, meanV = 0.0;
4083 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4084 meanU += it->get_u();
4085 meanV += it->get_v();
4086 }
4087
4088 meanU /= (double)m_ransacInliers.size();
4089 meanV /= (double)m_ransacInliers.size();
4090
4091 centerOfGravity.set_u(meanU);
4092 centerOfGravity.set_v(meanV);
4093 }
4094
4095 return isMatchOk;
4096}
4097
4113 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4114 std::vector<cv::Mat> &listOfDescriptors,
4115 std::vector<vpImage<unsigned char> > *listOfAffineI)
4116{
4117#if 0
4118 cv::Mat img;
4120 listOfKeypoints.clear();
4121 listOfDescriptors.clear();
4122
4123 for (int tl = 1; tl < 6; tl++) {
4124 double t = pow(2, 0.5 * tl);
4125 for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4126 std::vector<cv::KeyPoint> keypoints;
4127 cv::Mat descriptors;
4128
4129 cv::Mat timg, mask, Ai;
4130 img.copyTo(timg);
4131
4132 affineSkew(t, phi, timg, mask, Ai);
4133
4134
4135 if(listOfAffineI != NULL) {
4136 cv::Mat img_disp;
4137 bitwise_and(mask, timg, img_disp);
4139 vpImageConvert::convert(img_disp, tI);
4140 listOfAffineI->push_back(tI);
4141 }
4142#if 0
4143 cv::Mat img_disp;
4144 cv::bitwise_and(mask, timg, img_disp);
4145 cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4146 cv::imshow( "Skew", img_disp );
4147 cv::waitKey(0);
4148#endif
4149
4150 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4151 it != m_detectors.end(); ++it) {
4152 std::vector<cv::KeyPoint> kp;
4153 it->second->detect(timg, kp, mask);
4154 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4155 }
4156
4157 double elapsedTime;
4158 extract(timg, keypoints, descriptors, elapsedTime);
4159
4160 for(unsigned int i = 0; i < keypoints.size(); i++) {
4161 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4162 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4163 keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4164 keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4165 }
4166
4167 listOfKeypoints.push_back(keypoints);
4168 listOfDescriptors.push_back(descriptors);
4169 }
4170 }
4171
4172#else
4173 cv::Mat img;
4175
4176 // Create a vector for storing the affine skew parameters
4177 std::vector<std::pair<double, int> > listOfAffineParams;
4178 for (int tl = 1; tl < 6; tl++) {
4179 double t = pow(2, 0.5 * tl);
4180 for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4181 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4182 }
4183 }
4184
4185 listOfKeypoints.resize(listOfAffineParams.size());
4186 listOfDescriptors.resize(listOfAffineParams.size());
4187
4188 if (listOfAffineI != NULL) {
4189 listOfAffineI->resize(listOfAffineParams.size());
4190 }
4191
4192#ifdef VISP_HAVE_OPENMP
4193#pragma omp parallel for
4194#endif
4195 for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4196 std::vector<cv::KeyPoint> keypoints;
4197 cv::Mat descriptors;
4198
4199 cv::Mat timg, mask, Ai;
4200 img.copyTo(timg);
4201
4202 affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
4203
4204 if (listOfAffineI != NULL) {
4205 cv::Mat img_disp;
4206 bitwise_and(mask, timg, img_disp);
4208 vpImageConvert::convert(img_disp, tI);
4209 (*listOfAffineI)[(size_t)cpt] = tI;
4210 }
4211
4212#if 0
4213 cv::Mat img_disp;
4214 cv::bitwise_and(mask, timg, img_disp);
4215 cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4216 cv::imshow( "Skew", img_disp );
4217 cv::waitKey(0);
4218#endif
4219
4220 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4221 it != m_detectors.end(); ++it) {
4222 std::vector<cv::KeyPoint> kp;
4223 it->second->detect(timg, kp, mask);
4224 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4225 }
4226
4227 double elapsedTime;
4228 extract(timg, keypoints, descriptors, elapsedTime);
4229
4230 for (size_t i = 0; i < keypoints.size(); i++) {
4231 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4232 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4233 keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4234 keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4235 }
4236
4237 listOfKeypoints[(size_t)cpt] = keypoints;
4238 listOfDescriptors[(size_t)cpt] = descriptors;
4239 }
4240#endif
4241}
4242
4247{
4248 // vpBasicKeyPoint class
4250 currentImagePointsList.clear();
4251 matchedReferencePoints.clear();
4252 _reference_computed = false;
4253
4254 m_computeCovariance = false;
4255 m_covarianceMatrix = vpMatrix();
4256 m_currentImageId = 0;
4257 m_detectionMethod = detectionScore;
4258 m_detectionScore = 0.15;
4259 m_detectionThreshold = 100.0;
4260 m_detectionTime = 0.0;
4261 m_detectorNames.clear();
4262 m_detectors.clear();
4263 m_extractionTime = 0.0;
4264 m_extractorNames.clear();
4265 m_extractors.clear();
4266 m_filteredMatches.clear();
4267 m_filterType = ratioDistanceThreshold;
4268 m_imageFormat = jpgImageFormat;
4269 m_knnMatches.clear();
4270 m_mapOfImageId.clear();
4271 m_mapOfImages.clear();
4272 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4273 m_matcherName = "BruteForce-Hamming";
4274 m_matches.clear();
4275 m_matchingFactorThreshold = 2.0;
4276 m_matchingRatioThreshold = 0.85;
4277 m_matchingTime = 0.0;
4278 m_matchRansacKeyPointsToPoints.clear();
4279 m_nbRansacIterations = 200;
4280 m_nbRansacMinInlierCount = 100;
4281 m_objectFilteredPoints.clear();
4282 m_poseTime = 0.0;
4283 m_queryDescriptors = cv::Mat();
4284 m_queryFilteredKeyPoints.clear();
4285 m_queryKeyPoints.clear();
4286 m_ransacConsensusPercentage = 20.0;
4287 m_ransacFilterFlag = vpPose::NO_FILTER;
4288 m_ransacInliers.clear();
4289 m_ransacOutliers.clear();
4290 m_ransacParallel = true;
4291 m_ransacParallelNbThreads = 0;
4292 m_ransacReprojectionError = 6.0;
4293 m_ransacThreshold = 0.01;
4294 m_trainDescriptors = cv::Mat();
4295 m_trainKeyPoints.clear();
4296 m_trainPoints.clear();
4297 m_trainVpPoints.clear();
4298 m_useAffineDetection = false;
4299#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4300 m_useBruteForceCrossCheck = true;
4301#endif
4302 m_useConsensusPercentage = false;
4303 m_useKnn = true; // as m_filterType == ratioDistanceThreshold
4304 m_useMatchTrainToQuery = false;
4305 m_useRansacVVS = true;
4306 m_useSingleMatchFilter = true;
4307
4308 m_detectorNames.push_back("ORB");
4309 m_extractorNames.push_back("ORB");
4310
4311 init();
4312}
4313
4322void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, bool saveTrainingImages)
4323{
4324 std::string parent = vpIoTools::getParent(filename);
4325 if (!parent.empty()) {
4327 }
4328
4329 std::map<int, std::string> mapOfImgPath;
4330 if (saveTrainingImages) {
4331#ifdef VISP_HAVE_MODULE_IO
4332 // Save the training image files in the same directory
4333 unsigned int cpt = 0;
4334
4335 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4336 ++it, cpt++) {
4337 if (cpt > 999) {
4338 throw vpException(vpException::fatalError, "The number of training images to save is too big !");
4339 }
4340
4341 std::stringstream ss;
4342 ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
4343
4344 switch (m_imageFormat) {
4345 case jpgImageFormat:
4346 ss << ".jpg";
4347 break;
4348
4349 case pngImageFormat:
4350 ss << ".png";
4351 break;
4352
4353 case ppmImageFormat:
4354 ss << ".ppm";
4355 break;
4356
4357 case pgmImageFormat:
4358 ss << ".pgm";
4359 break;
4360
4361 default:
4362 ss << ".png";
4363 break;
4364 }
4365
4366 std::string imgFilename = ss.str();
4367 mapOfImgPath[it->first] = imgFilename;
4368 vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
4369 }
4370#else
4371 std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
4372 "are not saved because "
4373 "visp_io module is not available !"
4374 << std::endl;
4375#endif
4376 }
4377
4378 bool have3DInfo = m_trainPoints.size() > 0;
4379 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4380 throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
4381 }
4382
4383 if (binaryMode) {
4384 // Save the learning data into little endian binary file.
4385 std::ofstream file(filename.c_str(), std::ofstream::binary);
4386 if (!file.is_open()) {
4387 throw vpException(vpException::ioError, "Cannot create the file.");
4388 }
4389
4390 // Write info about training images
4391 int nbImgs = (int)mapOfImgPath.size();
4392 vpIoTools::writeBinaryValueLE(file, nbImgs);
4393
4394#ifdef VISP_HAVE_MODULE_IO
4395 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4396 // Write image_id
4397 int id = it->first;
4399
4400 // Write image path
4401 std::string path = it->second;
4402 int length = (int)path.length();
4403 vpIoTools::writeBinaryValueLE(file, length);
4404
4405 for (int cpt = 0; cpt < length; cpt++) {
4406 file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
4407 }
4408 }
4409#endif
4410
4411 // Write if we have 3D point information
4412 int have3DInfoInt = have3DInfo ? 1 : 0;
4413 vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
4414
4415 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4416 int descriptorType = m_trainDescriptors.type();
4417
4418 // Write the number of descriptors
4419 vpIoTools::writeBinaryValueLE(file, nRows);
4420
4421 // Write the size of the descriptor
4422 vpIoTools::writeBinaryValueLE(file, nCols);
4423
4424 // Write the type of the descriptor
4425 vpIoTools::writeBinaryValueLE(file, descriptorType);
4426
4427 for (int i = 0; i < nRows; i++) {
4428 unsigned int i_ = (unsigned int)i;
4429 // Write u
4430 float u = m_trainKeyPoints[i_].pt.x;
4432
4433 // Write v
4434 float v = m_trainKeyPoints[i_].pt.y;
4436
4437 // Write size
4438 float size = m_trainKeyPoints[i_].size;
4440
4441 // Write angle
4442 float angle = m_trainKeyPoints[i_].angle;
4443 vpIoTools::writeBinaryValueLE(file, angle);
4444
4445 // Write response
4446 float response = m_trainKeyPoints[i_].response;
4447 vpIoTools::writeBinaryValueLE(file, response);
4448
4449 // Write octave
4450 int octave = m_trainKeyPoints[i_].octave;
4451 vpIoTools::writeBinaryValueLE(file, octave);
4452
4453 // Write class_id
4454 int class_id = m_trainKeyPoints[i_].class_id;
4455 vpIoTools::writeBinaryValueLE(file, class_id);
4456
4457// Write image_id
4458#ifdef VISP_HAVE_MODULE_IO
4459 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4460 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4461 vpIoTools::writeBinaryValueLE(file, image_id);
4462#else
4463 int image_id = -1;
4464 // file.write((char *)(&image_id), sizeof(image_id));
4465 vpIoTools::writeBinaryValueLE(file, image_id);
4466#endif
4467
4468 if (have3DInfo) {
4469 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4470 // Write oX
4472
4473 // Write oY
4475
4476 // Write oZ
4478 }
4479
4480 for (int j = 0; j < nCols; j++) {
4481 // Write the descriptor value
4482 switch (descriptorType) {
4483 case CV_8U:
4484 file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
4485 sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
4486 break;
4487
4488 case CV_8S:
4489 file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
4490 break;
4491
4492 case CV_16U:
4493 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
4494 break;
4495
4496 case CV_16S:
4497 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
4498 break;
4499
4500 case CV_32S:
4501 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
4502 break;
4503
4504 case CV_32F:
4505 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
4506 break;
4507
4508 case CV_64F:
4509 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
4510 break;
4511
4512 default:
4513 throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4514 break;
4515 }
4516 }
4517 }
4518
4519 file.close();
4520 } else {
4521 pugi::xml_document doc;
4522 pugi::xml_node node = doc.append_child(pugi::node_declaration);
4523 node.append_attribute("version") = "1.0";
4524 node.append_attribute("encoding") = "UTF-8";
4525
4526 if (!doc) {
4527 throw vpException(vpException::ioError, "Error with file: ", filename.c_str());
4528 }
4529
4530 pugi::xml_node root_node = doc.append_child("LearningData");
4531
4532 // Write the training images info
4533 pugi::xml_node image_node = root_node.append_child("TrainingImageInfo");
4534
4535#ifdef VISP_HAVE_MODULE_IO
4536 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4537 pugi::xml_node image_info_node = image_node.append_child("trainImg");
4538 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4539 std::stringstream ss;
4540 ss << it->first;
4541 image_info_node.append_attribute("image_id") = ss.str().c_str();
4542 }
4543#endif
4544
4545 // Write information about descriptors
4546 pugi::xml_node descriptors_info_node = root_node.append_child("DescriptorsInfo");
4547
4548 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4549 int descriptorType = m_trainDescriptors.type();
4550
4551 // Write the number of rows
4552 descriptors_info_node.append_child("nrows").append_child(pugi::node_pcdata).text() = nRows;
4553
4554 // Write the number of cols
4555 descriptors_info_node.append_child("ncols").append_child(pugi::node_pcdata).text() = nCols;
4556
4557 // Write the descriptors type
4558 descriptors_info_node.append_child("type").append_child(pugi::node_pcdata).text() = descriptorType;
4559
4560 for (int i = 0; i < nRows; i++) {
4561 unsigned int i_ = (unsigned int)i;
4562 pugi::xml_node descriptor_node = root_node.append_child("DescriptorInfo");
4563
4564 descriptor_node.append_child("u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4565 descriptor_node.append_child("v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4566 descriptor_node.append_child("size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4567 descriptor_node.append_child("angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4568 descriptor_node.append_child("response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4569 descriptor_node.append_child("octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4570 descriptor_node.append_child("class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4571
4572#ifdef VISP_HAVE_MODULE_IO
4573 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4574 descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() =
4575 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4576#else
4577 descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() = -1;
4578#endif
4579
4580 if (have3DInfo) {
4581 descriptor_node.append_child("oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4582 descriptor_node.append_child("oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4583 descriptor_node.append_child("oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4584 }
4585
4586 pugi::xml_node desc_node = descriptor_node.append_child("desc");
4587
4588 for (int j = 0; j < nCols; j++) {
4589 switch (descriptorType) {
4590 case CV_8U: {
4591 // Promote an unsigned char to an int
4592 // val_tmp holds the numeric value that will be written
4593 // We save the value in numeric form otherwise xml library will not be
4594 // able to parse A better solution could be possible
4595 int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
4596 desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4597 } break;
4598
4599 case CV_8S: {
4600 // Promote a char to an int
4601 // val_tmp holds the numeric value that will be written
4602 // We save the value in numeric form otherwise xml library will not be
4603 // able to parse A better solution could be possible
4604 int val_tmp = m_trainDescriptors.at<char>(i, j);
4605 desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4606 } break;
4607
4608 case CV_16U:
4609 desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4610 m_trainDescriptors.at<unsigned short int>(i, j);
4611 break;
4612
4613 case CV_16S:
4614 desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<short int>(i, j);
4615 break;
4616
4617 case CV_32S:
4618 desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<int>(i, j);
4619 break;
4620
4621 case CV_32F:
4622 desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<float>(i, j);
4623 break;
4624
4625 case CV_64F:
4626 desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<double>(i, j);
4627 break;
4628
4629 default:
4630 throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4631 break;
4632 }
4633 }
4634 }
4635
4636 doc.save_file(filename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8);
4637 }
4638}
4639
4640#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4641#ifndef DOXYGEN_SHOULD_SKIP_THIS
4642// From OpenCV 2.4.11 source code.
4643struct KeypointResponseGreaterThanThreshold {
4644 KeypointResponseGreaterThanThreshold(float _value) : value(_value) {}
4645 inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
4646 float value;
4647};
4648
4649struct KeypointResponseGreater {
4650 inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
4651};
4652
4653// takes keypoints and culls them by the response
4654void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
4655{
4656 // this is only necessary if the keypoints size is greater than the number
4657 // of desired points.
4658 if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4659 if (n_points == 0) {
4660 keypoints.clear();
4661 return;
4662 }
4663 // first use nth element to partition the keypoints into the best and
4664 // worst.
4665 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4666 // this is the boundary response, and in the case of FAST may be ambiguous
4667 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4668 // use std::partition to grab all of the keypoints with the boundary
4669 // response.
4670 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4671 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4672 // resize the keypoints, given this new end point. nth_element and
4673 // partition reordered the points inplace
4674 keypoints.resize((size_t)(new_end - keypoints.begin()));
4675 }
4676}
4677
4678struct RoiPredicate {
4679 RoiPredicate(const cv::Rect &_r) : r(_r) {}
4680
4681 bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
4682
4683 cv::Rect r;
4684};
4685
4686void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4687 int borderSize)
4688{
4689 if (borderSize > 0) {
4690 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4691 keypoints.clear();
4692 else
4693 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4694 RoiPredicate(cv::Rect(
4695 cv::Point(borderSize, borderSize),
4696 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4697 keypoints.end());
4698 }
4699}
4700
4701struct SizePredicate {
4702 SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4703
4704 bool operator()(const cv::KeyPoint &keyPt) const
4705 {
4706 float size = keyPt.size;
4707 return (size < minSize) || (size > maxSize);
4708 }
4709
4710 float minSize, maxSize;
4711};
4712
4713void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4714{
4715 CV_Assert(minSize >= 0);
4716 CV_Assert(maxSize >= 0);
4717 CV_Assert(minSize <= maxSize);
4718
4719 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4720}
4721
4722class MaskPredicate
4723{
4724public:
4725 MaskPredicate(const cv::Mat &_mask) : mask(_mask) {}
4726 bool operator()(const cv::KeyPoint &key_pt) const
4727 {
4728 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4729 }
4730
4731private:
4732 const cv::Mat mask;
4733};
4734
4735void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4736{
4737 if (mask.empty())
4738 return;
4739
4740 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4741}
4742
4743struct KeyPoint_LessThan {
4744 KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4745 bool operator()(/*int i, int j*/ size_t i, size_t j) const
4746 {
4747 const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4748 const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4749 if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4750 std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4751 // kp2.pt.x) {
4752 return kp1.pt.x < kp2.pt.x;
4753 }
4754
4755 if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4756 std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4757 // kp2.pt.y) {
4758 return kp1.pt.y < kp2.pt.y;
4759 }
4760
4761 if (!vpMath::equal(kp1.size, kp2.size,
4762 std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4763 // kp2.size) {
4764 return kp1.size > kp2.size;
4765 }
4766
4767 if (!vpMath::equal(kp1.angle, kp2.angle,
4768 std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4769 // kp2.angle) {
4770 return kp1.angle < kp2.angle;
4771 }
4772
4773 if (!vpMath::equal(kp1.response, kp2.response,
4774 std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4775 // kp2.response) {
4776 return kp1.response > kp2.response;
4777 }
4778
4779 if (kp1.octave != kp2.octave) {
4780 return kp1.octave > kp2.octave;
4781 }
4782
4783 if (kp1.class_id != kp2.class_id) {
4784 return kp1.class_id > kp2.class_id;
4785 }
4786
4787 return i < j;
4788 }
4789 const std::vector<cv::KeyPoint> *kp;
4790};
4791
4792void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4793{
4794 size_t i, j, n = keypoints.size();
4795 std::vector<size_t> kpidx(n);
4796 std::vector<uchar> mask(n, (uchar)1);
4797
4798 for (i = 0; i < n; i++) {
4799 kpidx[i] = i;
4800 }
4801 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4802 for (i = 1, j = 0; i < n; i++) {
4803 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4804 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4805 // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4806 // kp2.size || kp1.angle != kp2.angle) {
4807 if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4808 !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4809 !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4810 !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4811 j = i;
4812 } else {
4813 mask[kpidx[i]] = 0;
4814 }
4815 }
4816
4817 for (i = j = 0; i < n; i++) {
4818 if (mask[i]) {
4819 if (i != j) {
4820 keypoints[j] = keypoints[i];
4821 }
4822 j++;
4823 }
4824 }
4825 keypoints.resize(j);
4826}
4827
4828/*
4829 * PyramidAdaptedFeatureDetector
4830 */
4831vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &_detector,
4832 int _maxLevel)
4833 : detector(_detector), maxLevel(_maxLevel)
4834{
4835}
4836
4837bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4838{
4839 return detector.empty() || (cv::FeatureDetector *)detector->empty();
4840}
4841
4842void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4843 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4844{
4845 detectImpl(image.getMat(), keypoints, mask.getMat());
4846}
4847
4848void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4849 const cv::Mat &mask) const
4850{
4851 cv::Mat src = image;
4852 cv::Mat src_mask = mask;
4853
4854 cv::Mat dilated_mask;
4855 if (!mask.empty()) {
4856 cv::dilate(mask, dilated_mask, cv::Mat());
4857 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4858 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4859 dilated_mask = mask255;
4860 }
4861
4862 for (int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4863 // Detect on current level of the pyramid
4864 std::vector<cv::KeyPoint> new_pts;
4865 detector->detect(src, new_pts, src_mask);
4866 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4867 for (; it != end; ++it) {
4868 it->pt.x *= multiplier;
4869 it->pt.y *= multiplier;
4870 it->size *= multiplier;
4871 it->octave = l;
4872 }
4873 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4874
4875 // Downsample
4876 if (l < maxLevel) {
4877 cv::Mat dst;
4878 pyrDown(src, dst);
4879 src = dst;
4880
4881 if (!mask.empty())
4882 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4883 }
4884 }
4885
4886 if (!mask.empty())
4887 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4888}
4889#endif
4890#endif
4891
4892#elif !defined(VISP_BUILD_SHARED_LIBS)
4893// Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no symbols
4894void dummy_vpKeyPoint(){};
4895#endif
std::vector< vpImagePoint > currentImagePointsList
std::vector< unsigned int > matchedReferencePoints
std::vector< vpImagePoint > referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
Definition vpColor.h:152
static const vpColor red
Definition vpColor.h:211
static const vpColor none
Definition vpColor.h:223
static const vpColor green
Definition vpColor.h:214
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Definition vpException.h:59
@ ioError
I/O error.
Definition vpException.h:79
@ badValue
Used to indicate that a value is not in the allowed range.
Definition vpException.h:85
@ fatalError
Fatal error.
Definition vpException.h:84
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
double get_j() const
void set_ij(double ii, double jj)
void set_u(double u)
void set_v(double v)
double get_i() const
Definition of the vpImage class member functions.
Definition vpImage.h:135
unsigned int getWidth() const
Definition vpImage.h:242
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition vpImage.h:1361
unsigned int getHeight() const
Definition vpImage.h:184
static bool isAbsolutePathname(const std::string &pathname)
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
static void makeDirectory(const std::string &dirname)
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
static std::string getParent(const std::string &pathname)
unsigned int matchPoint(const vpImage< unsigned char > &I)
@ detectionThreshold
Definition vpKeyPoint.h:231
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void initMatcher(const std::string &matcherName)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
@ DETECTOR_SimpleBlob
Definition vpKeyPoint.h:254
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
vpFeatureDescriptorType
Definition vpKeyPoint.h:278
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
@ stdAndRatioDistanceThreshold
Definition vpKeyPoint.h:224
@ constantFactorDistanceThreshold
Definition vpKeyPoint.h:217
@ ratioDistanceThreshold
Definition vpKeyPoint.h:221
@ stdDistanceThreshold
Definition vpKeyPoint.h:219
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
Definition vpMath.cpp:93
static bool equal(double x, double y, double threshold=0.001)
Definition vpMath.h:369
static int round(double x)
Definition vpMath.h:323
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:152
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Definition vpPlane.h:54
double getD() const
Definition vpPlane.h:106
double getA() const
Definition vpPlane.h:100
double getC() const
Definition vpPlane.h:104
double getB() const
Definition vpPlane.h:102
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition vpPoint.h:77
double get_oX() const
Get the point oX coordinate in the object frame.
Definition vpPoint.cpp:458
void set_x(double x)
Set the point x coordinate in the image plane.
Definition vpPoint.cpp:508
double get_y() const
Get the point y coordinate in the image plane.
Definition vpPoint.cpp:469
double get_oZ() const
Get the point oZ coordinate in the object frame.
Definition vpPoint.cpp:462
void set_oY(double oY)
Set the point oY coordinate in the object frame.
Definition vpPoint.cpp:501
double get_x() const
Get the point x coordinate in the image plane.
Definition vpPoint.cpp:467
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
Definition vpPoint.cpp:503
void set_oX(double oX)
Set the point oX coordinate in the object frame.
Definition vpPoint.cpp:499
double get_oY() const
Get the point oY coordinate in the object frame.
Definition vpPoint.cpp:460
void setWorldCoordinates(double oX, double oY, double oZ)
Definition vpPoint.cpp:110
void set_y(double y)
Set the point y coordinate in the image plane.
Definition vpPoint.cpp:510
Defines a generic 2D polygon.
Definition vpPolygon.h:97
vpRect getBoundingBox() const
Definition vpPolygon.h:171
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition vpPose.h:81
void setRansacMaxTrials(const int &rM)
Definition vpPose.h:256
void addPoint(const vpPoint &P)
Definition vpPose.cpp:140
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition vpPose.h:245
vpMatrix getCovarianceMatrix() const
Definition vpPose.h:278
std::vector< unsigned int > getRansacInlierIndex() const
Definition vpPose.h:258
@ RANSAC
Definition vpPose.h:91
void setCovarianceComputation(const bool &flag)
Definition vpPose.h:267
std::vector< vpPoint > getRansacInliers() const
Definition vpPose.h:259
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition vpPose.h:298
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition vpPose.cpp:469
@ NO_FILTER
Definition vpPose.h:109
void setUseParallelRansac(bool use)
Definition vpPose.h:329
void setNbParallelRansacThreads(int nb)
Definition vpPose.h:315
void setRansacThreshold(const double &t)
Definition vpPose.h:246
Defines a rectangle in the plane.
Definition vpRect.h:76
double getWidth() const
Definition vpRect.h:224
double getLeft() const
Definition vpRect.h:170
double getRight() const
Definition vpRect.h:176
double getBottom() const
Definition vpRect.h:94
double getHeight() const
Definition vpRect.h:163
double getTop() const
Definition vpRect.h:189
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
void parse(const std::string &filename)
vpMatchingMethodEnum getMatchingMethod() const
VISP_EXPORT double measureTimeMs()