Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
tutorial-mb-generic-tracker-rgbd-structure-core.cpp
1
2#include <iostream>
3
4#include <visp3/core/vpConfig.h>
5
6#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && defined(VISP_HAVE_OPENCV)
7#include <visp3/core/vpDisplay.h>
8#include <visp3/core/vpIoTools.h>
9#include <visp3/core/vpXmlParserCamera.h>
10#include <visp3/gui/vpDisplayGDI.h>
11#include <visp3/gui/vpDisplayOpenCV.h>
12#include <visp3/gui/vpDisplayX.h>
13#include <visp3/mbt/vpMbGenericTracker.h>
14#include <visp3/sensor/vpOccipitalStructure.h>
15#include <visp3/vision/vpKeyPoint.h>
16
17int main(int argc, char *argv[])
18{
19 std::string config_color = "", config_depth = "";
20 std::string model_color = "", model_depth = "";
21 std::string init_file = "";
22 bool use_ogre = false;
23 bool use_scanline = false;
24 bool use_edges = true;
25 bool use_klt = true;
26 bool use_depth = true;
27 bool learn = false;
28 bool auto_init = false;
29 double proj_error_threshold = 25;
30 std::string learning_data = "learning/data-learned.bin";
31 bool display_projection_error = false;
32
33 for (int i = 1; i < argc; i++) {
34 if (std::string(argv[i]) == "--config_color" && i + 1 < argc) {
35 config_color = std::string(argv[i + 1]);
36 } else if (std::string(argv[i]) == "--config_depth" && i + 1 < argc) {
37 config_depth = std::string(argv[i + 1]);
38 } else if (std::string(argv[i]) == "--model_color" && i + 1 < argc) {
39 model_color = std::string(argv[i + 1]);
40 } else if (std::string(argv[i]) == "--model_depth" && i + 1 < argc) {
41 model_depth = std::string(argv[i + 1]);
42 } else if (std::string(argv[i]) == "--init_file" && i + 1 < argc) {
43 init_file = std::string(argv[i + 1]);
44 } else if (std::string(argv[i]) == "--proj_error_threshold" && i + 1 < argc) {
45 proj_error_threshold = std::atof(argv[i + 1]);
46 } else if (std::string(argv[i]) == "--use_ogre") {
47 use_ogre = true;
48 } else if (std::string(argv[i]) == "--use_scanline") {
49 use_scanline = true;
50 } else if (std::string(argv[i]) == "--use_edges" && i + 1 < argc) {
51 use_edges = (std::atoi(argv[i + 1]) == 0 ? false : true);
52 } else if (std::string(argv[i]) == "--use_klt" && i + 1 < argc) {
53 use_klt = (std::atoi(argv[i + 1]) == 0 ? false : true);
54 } else if (std::string(argv[i]) == "--use_depth" && i + 1 < argc) {
55 use_depth = (std::atoi(argv[i + 1]) == 0 ? false : true);
56 } else if (std::string(argv[i]) == "--learn") {
57 learn = true;
58 } else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
59 learning_data = argv[i + 1];
60 } else if (std::string(argv[i]) == "--auto_init") {
61 auto_init = true;
62 } else if (std::string(argv[i]) == "--display_proj_error") {
63 display_projection_error = true;
64 } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
65 std::cout << "Usage: \n"
66 << argv[0]
67 << " [--model_color <object.cao>] [--model_depth <object.cao>]"
68 " [--config_color <object.xml>] [--config_depth <object.xml>]"
69 " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
70 " [--proj_error_threshold <threshold between 0 and 90> (default: "
71 << proj_error_threshold
72 << ")]"
73 " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth <0|1> (default: 1)]"
74 " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
75 " [--display_proj_error]"
76 << std::endl;
77
78 std::cout << "\n** How to track a 4.2 cm width cube with manual initialization:\n"
79 << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1" << std::endl;
80 std::cout << "\n** How to learn the cube and create a learning database:\n"
81 << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
82 << std::endl;
83 std::cout << "\n** How to track the cube with initialization from learning database:\n"
84 << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
85 << std::endl;
86
87 return EXIT_SUCCESS;
88 }
89 }
90
91 if (model_depth.empty()) {
92 model_depth = model_color;
93 }
94 std::string parentname = vpIoTools::getParent(model_color);
95 if (config_color.empty()) {
96 config_color = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".xml";
97 }
98 if (config_depth.empty()) {
99 config_depth = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + "_depth.xml";
100 }
101 if (init_file.empty()) {
102 init_file = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".init";
103 }
104 std::cout << "Tracked features: " << std::endl;
105 std::cout << " Use edges : " << use_edges << std::endl;
106 std::cout << " Use klt : " << use_klt << std::endl;
107 std::cout << " Use depth : " << use_depth << std::endl;
108 std::cout << "Tracker options: " << std::endl;
109 std::cout << " Use ogre : " << use_ogre << std::endl;
110 std::cout << " Use scanline: " << use_scanline << std::endl;
111 std::cout << " Proj. error : " << proj_error_threshold << std::endl;
112 std::cout << " Display proj. error: " << display_projection_error << std::endl;
113 std::cout << "Config files: " << std::endl;
114 std::cout << " Config color: "
115 << "\"" << config_color << "\"" << std::endl;
116 std::cout << " Config depth: "
117 << "\"" << config_depth << "\"" << std::endl;
118 std::cout << " Model color : "
119 << "\"" << model_color << "\"" << std::endl;
120 std::cout << " Model depth : "
121 << "\"" << model_depth << "\"" << std::endl;
122 std::cout << " Init file : "
123 << "\"" << init_file << "\"" << std::endl;
124 std::cout << "Learning options : " << std::endl;
125 std::cout << " Learn : " << learn << std::endl;
126 std::cout << " Auto init : " << auto_init << std::endl;
127 std::cout << " Learning data: " << learning_data << std::endl;
128
129 if (!use_edges && !use_klt && !use_depth) {
130 std::cout << "You must choose at least one visual features between edge, KLT and depth." << std::endl;
131 return EXIT_FAILURE;
132 }
133
134 if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
135 std::cout << "config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || "
136 "init_file.empty()"
137 << std::endl;
138 return EXIT_FAILURE;
139 }
140
142 ST::CaptureSessionSettings settings;
143 settings.source = ST::CaptureSessionSourceId::StructureCore;
144 settings.structureCore.visibleEnabled = true;
145 settings.applyExpensiveCorrection = true; // Apply a correction and clean filter to the depth before streaming.
146
147 try {
148 sc.open(settings);
149 } catch (const vpException &e) {
150 std::cout << "Catch an exception: " << e.what() << std::endl;
151 std::cout << "Check if the Structure Core camera is connected..." << std::endl;
152 return EXIT_SUCCESS;
153 }
154
158
159 std::cout << "Sensor internal camera parameters for color camera: " << cam_color << std::endl;
160 std::cout << "Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
161
162 vpImage<vpRGBa> I_color(height, width);
163 vpImage<unsigned char> I_gray(height, width), I_depth(height, width);
164 vpImage<float> I_depth_raw(height, width);
165
166 unsigned int _posx = 100, _posy = 50;
167
168#ifdef VISP_HAVE_X11
169 vpDisplayX d1, d2;
170#elif defined(VISP_HAVE_GDI)
171 vpDisplayGDI d1, d2;
172#elif defined(HAVE_OPENCV_HIGHGUI)
173 vpDisplayOpenCV d1, d2;
174#endif
175 if (use_edges || use_klt)
176 d1.init(I_gray, _posx, _posy, "Color stream");
177 if (use_depth)
178 d2.init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream");
179
180 while (true) {
181 sc.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, NULL, NULL);
182
183 if (use_edges || use_klt) {
184 vpImageConvert::convert(I_color, I_gray);
185 vpDisplay::display(I_gray);
186 vpDisplay::displayText(I_gray, 20, 20, "Click when ready.", vpColor::red);
187 vpDisplay::flush(I_gray);
188
189 if (vpDisplay::getClick(I_gray, false)) {
190 break;
191 }
192 }
193 if (use_depth) {
194 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
195 vpDisplay::display(I_depth);
196 vpDisplay::displayText(I_depth, 20, 20, "Click when ready.", vpColor::red);
197 vpDisplay::flush(I_depth);
198
199 if (vpDisplay::getClick(I_depth, false)) {
200 break;
201 }
202 }
203 }
204
205 std::vector<int> trackerTypes;
206 if (use_edges && use_klt)
208 else if (use_edges)
209 trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
210 else if (use_klt)
211 trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER);
212
213 if (use_depth)
214 trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
215
217 vpHomogeneousMatrix depth_M_color = color_M_depth.inverse();
218 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
219 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
220 std::map<std::string, std::string> mapOfInitFiles;
221 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
222 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
223 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
224
225 std::vector<vpColVector> pointcloud;
226
227 vpMbGenericTracker tracker(trackerTypes);
228
229 if ((use_edges || use_klt) && use_depth) {
230 tracker.loadConfigFile(config_color, config_depth);
231 tracker.loadModel(model_color, model_depth);
232 std::cout << "Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
233 mapOfCameraTransformations["Camera2"] = depth_M_color;
234 tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
235 mapOfImages["Camera1"] = &I_gray;
236 mapOfImages["Camera2"] = &I_depth;
237 mapOfInitFiles["Camera1"] = init_file;
238 tracker.setCameraParameters(cam_color, cam_depth);
239 } else if (use_edges || use_klt) {
240 tracker.loadConfigFile(config_color);
241 tracker.loadModel(model_color);
242 tracker.setCameraParameters(cam_color);
243 } else if (use_depth) {
244 tracker.loadConfigFile(config_depth);
245 tracker.loadModel(model_depth);
246 tracker.setCameraParameters(cam_depth);
247 }
248
249 tracker.setDisplayFeatures(true);
250 tracker.setOgreVisibilityTest(use_ogre);
251 tracker.setScanLineVisibilityTest(use_scanline);
252 tracker.setProjectionErrorComputation(true);
253 tracker.setProjectionErrorDisplay(display_projection_error);
254
255#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
256 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
257 std::string detectorName = "SIFT";
258 std::string extractorName = "SIFT";
259 std::string matcherName = "BruteForce";
260#else
261 std::string detectorName = "FAST";
262 std::string extractorName = "ORB";
263 std::string matcherName = "BruteForce-Hamming";
264#endif
265 vpKeyPoint keypoint;
266 if (learn || auto_init) {
267 keypoint.setDetector(detectorName);
268 keypoint.setExtractor(extractorName);
269 keypoint.setMatcher(matcherName);
270#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
271#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
272 keypoint.setDetectorParameter("ORB", "nLevels", 1);
273#else
274 cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
275 if (orb_detector) {
276 orb_detector->setNLevels(1);
277 }
278#endif
279#endif
280 }
281
282 if (auto_init) {
283 if (!vpIoTools::checkFilename(learning_data)) {
284 std::cout << "Cannot enable auto detection. Learning file \"" << learning_data << "\" doesn't exist" << std::endl;
285 return EXIT_FAILURE;
286 }
287 keypoint.loadLearningData(learning_data, true);
288 } else {
289 if ((use_edges || use_klt) && use_depth)
290 tracker.initClick(mapOfImages, mapOfInitFiles, true);
291 else if (use_edges || use_klt)
292 tracker.initClick(I_gray, init_file, true);
293 else if (use_depth)
294 tracker.initClick(I_depth, init_file, true);
295
296 if (learn)
298 }
299
300 bool run_auto_init = false;
301 if (auto_init) {
302 run_auto_init = true;
303 }
304 std::vector<double> times_vec;
305
306 try {
307 // To be able to display keypoints matching with test-detection-rs2
308 int learn_id = 1;
309 bool quit = false;
310 bool learn_position = false;
311 double loop_t = 0;
313
314 while (!quit) {
315 double t = vpTime::measureTimeMs();
316 bool tracking_failed = false;
317
318 // Acquire images and update tracker input data
319 sc.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud);
320
321 if (use_edges || use_klt || run_auto_init) {
322 vpImageConvert::convert(I_color, I_gray);
323 vpDisplay::display(I_gray);
324 }
325 if (use_depth) {
326 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
327 vpDisplay::display(I_depth);
328 }
329
330 if ((use_edges || use_klt) && use_depth) {
331 mapOfImages["Camera1"] = &I_gray;
332 mapOfPointclouds["Camera2"] = &pointcloud;
333 mapOfWidths["Camera2"] = width;
334 mapOfHeights["Camera2"] = height;
335 } else if (use_edges || use_klt) {
336 mapOfImages["Camera"] = &I_gray;
337 } else if (use_depth) {
338 mapOfPointclouds["Camera"] = &pointcloud;
339 mapOfWidths["Camera"] = width;
340 mapOfHeights["Camera"] = height;
341 }
342
343 // Run auto initialization from learned data
344 if (run_auto_init) {
345 if (keypoint.matchPoint(I_gray, cam_color, cMo)) {
346 std::cout << "Auto init succeed" << std::endl;
347 if ((use_edges || use_klt) && use_depth) {
348 mapOfCameraPoses["Camera1"] = cMo;
349 mapOfCameraPoses["Camera2"] = depth_M_color * cMo;
350 tracker.initFromPose(mapOfImages, mapOfCameraPoses);
351 } else if (use_edges || use_klt) {
352 tracker.initFromPose(I_gray, cMo);
353 } else if (use_depth) {
354 tracker.initFromPose(I_depth, depth_M_color * cMo);
355 }
356 } else {
357 if (use_edges || use_klt) {
358 vpDisplay::flush(I_gray);
359 }
360 if (use_depth) {
361 vpDisplay::flush(I_depth);
362 }
363 continue;
364 }
365 }
366
367 // Run the tracker
368 try {
369 if (run_auto_init) {
370 // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
371 tracker.setDisplayFeatures(true);
372
373 run_auto_init = false;
374 }
375 if ((use_edges || use_klt) && use_depth) {
376 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
377 } else if (use_edges || use_klt) {
378 tracker.track(I_gray);
379 } else if (use_depth) {
380 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
381 }
382 } catch (const vpException &e) {
383 std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
384 tracking_failed = true;
385 if (auto_init) {
386 std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
387 run_auto_init = true;
388 }
389 }
390
391 // Get object pose
392 cMo = tracker.getPose();
393
394 // Check tracking errors
395 double proj_error = 0;
396 if (tracker.getTrackerType() & vpMbGenericTracker::EDGE_TRACKER) {
397 // Check tracking errors
398 proj_error = tracker.getProjectionError();
399 } else {
400 proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
401 }
402
403 if (auto_init && proj_error > proj_error_threshold) {
404 std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
405 run_auto_init = true;
406 tracking_failed = true;
407 }
408
409 // Display tracking results
410 if (!tracking_failed) {
411 // Turn display features on
412 tracker.setDisplayFeatures(true);
413
414 if ((use_edges || use_klt) && use_depth) {
415 tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
416 vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
417 vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
418 } else if (use_edges || use_klt) {
419 tracker.display(I_gray, cMo, cam_color, vpColor::red, 3);
420 vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
421 } else if (use_depth) {
422 tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
423 vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
424 }
425
426 {
427 std::stringstream ss;
428 ss << "Nb features: " << tracker.getError().size();
429 vpDisplay::displayText(I_gray, I_gray.getHeight() - 50, 20, ss.str(), vpColor::red);
430 }
431 {
432 std::stringstream ss;
433 ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt()
434 << ", depth " << tracker.getNbFeaturesDepthDense();
435 vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
436 }
437 }
438
439 std::stringstream ss;
440 ss << "Loop time: " << loop_t << " ms";
441
443 if (use_edges || use_klt) {
444 vpDisplay::displayText(I_gray, 20, 20, ss.str(), vpColor::red);
445 if (learn)
446 vpDisplay::displayText(I_gray, 35, 20, "Left click: learn Right click: quit", vpColor::red);
447 else if (auto_init)
448 vpDisplay::displayText(I_gray, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
449 else
450 vpDisplay::displayText(I_gray, 35, 20, "Right click: quit", vpColor::red);
451
452 vpDisplay::flush(I_gray);
453
454 if (vpDisplay::getClick(I_gray, button, false)) {
455 if (button == vpMouseButton::button3) {
456 quit = true;
457 } else if (button == vpMouseButton::button1 && learn) {
458 learn_position = true;
459 } else if (button == vpMouseButton::button1 && auto_init && !learn) {
460 run_auto_init = true;
461 }
462 }
463 }
464 if (use_depth) {
465 vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
466 vpDisplay::displayText(I_depth, 40, 20, "Click to quit", vpColor::red);
467 vpDisplay::flush(I_depth);
468
469 if (vpDisplay::getClick(I_depth, false)) {
470 quit = true;
471 }
472 }
473
474 if (learn_position) {
475 // Detect keypoints on the current image
476 std::vector<cv::KeyPoint> trainKeyPoints;
477 keypoint.detect(I_gray, trainKeyPoints);
478
479 // Keep only keypoints on the cube
480 std::vector<vpPolygon> polygons;
481 std::vector<std::vector<vpPoint> > roisPt;
482 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
483 polygons = pair.first;
484 roisPt = pair.second;
485
486 // Compute the 3D coordinates
487 std::vector<cv::Point3f> points3f;
488 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam_color, trainKeyPoints, polygons, roisPt, points3f);
489
490 // Build the reference keypoints
491 keypoint.buildReference(I_gray, trainKeyPoints, points3f, true, learn_id++);
492
493 // Display learned data
494 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
495 vpDisplay::displayCross(I_gray, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
496 }
497 learn_position = false;
498 std::cout << "Data learned" << std::endl;
499 }
500 loop_t = vpTime::measureTimeMs() - t;
501 times_vec.push_back(loop_t);
502 }
503 if (learn) {
504 std::cout << "Save learning file: " << learning_data << std::endl;
505 keypoint.saveLearningData(learning_data, true, true);
506 }
507 } catch (const vpException &e) {
508 std::cout << "Catch an exception: " << e.what() << std::endl;
509 }
510
511 if (!times_vec.empty()) {
512 std::cout << "\nProcessing time, Mean: " << vpMath::getMean(times_vec)
513 << " ms ; Median: " << vpMath::getMedian(times_vec) << " ; Std: " << vpMath::getStdev(times_vec) << " ms"
514 << std::endl;
515 }
516
517 return EXIT_SUCCESS;
518}
519#elif defined(VISP_HAVE_OCCIPITAL_STRUCTURE)
520int main()
521{
522 std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
523 return EXIT_SUCCESS;
524}
525#else
526int main()
527{
528 std::cout << "Install libStructure 3rd party, configure and build ViSP again to use this example" << std::endl;
529 return EXIT_SUCCESS;
530}
531#endif
Generic class defining intrinsic camera parameters.
static const vpColor red
Definition vpColor.h:211
static const vpColor none
Definition vpColor.h:223
static const vpColor yellow
Definition vpColor.h:219
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:132
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:59
const std::string & getStringMessage() const
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Definition vpImage.h:135
static bool checkFilename(const std::string &filename)
static void makeDirectory(const std::string &dirname)
static std::string getNameWE(const std::string &pathname)
static std::string getParent(const std::string &pathname)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition vpKeyPoint.h:212
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition vpKeyPoint.h:823
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void setMatcher(const std::string &matcherName)
Definition vpKeyPoint.h:899
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition vpKeyPoint.h:765
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition vpKeyPoint.h:480
unsigned int buildReference(const vpImage< unsigned char > &I)
static double getMedian(const std::vector< double > &v)
Definition vpMath.cpp:314
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:345
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:294
Real-time 6D object pose tracking using its CAD model.
unsigned int getHeight(vpOccipitalStructureStream stream_type)
vpCameraParameters getCameraParameters(const vpOccipitalStructureStream stream_type, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithoutDistortion)
void acquire(vpImage< unsigned char > &gray, bool undistorted=false, double *ts=NULL)
unsigned int getWidth(vpOccipitalStructureStream stream_type)
vpHomogeneousMatrix getTransform(const vpOccipitalStructureStream from, const vpOccipitalStructureStream to)
bool open(const ST::CaptureSessionSettings &settings)
VISP_EXPORT double measureTimeMs()