2#include <visp3/core/vpConfig.h>
3#ifdef VISP_HAVE_MODULE_SENSOR
4#include <visp3/sensor/vp1394CMUGrabber.h>
5#include <visp3/sensor/vp1394TwoGrabber.h>
6#include <visp3/sensor/vpFlyCaptureGrabber.h>
7#include <visp3/sensor/vpRealSense2.h>
8#include <visp3/sensor/vpV4l2Grabber.h>
10#include <visp3/core/vpIoTools.h>
11#include <visp3/core/vpXmlParserCamera.h>
12#include <visp3/gui/vpDisplayGDI.h>
13#include <visp3/gui/vpDisplayOpenCV.h>
14#include <visp3/gui/vpDisplayX.h>
15#include <visp3/io/vpImageIo.h>
16#include <visp3/vision/vpKeyPoint.h>
18#include <visp3/mbt/vpMbGenericTracker.h>
21#if defined(HAVE_OPENCV_VIDEOIO)
22#include <opencv2/videoio.hpp>
34int main(
int argc,
char **argv)
36#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_VIDEOIO) && \
37 (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
38 defined(HAVE_OPENCV_HIGHGUI) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2))
41 std::string opt_modelname =
"model/teabox/teabox.cao";
44 double opt_proj_error_threshold = 30.;
45 bool opt_use_ogre =
false;
46 bool opt_use_scanline =
false;
47 bool opt_display_projection_error =
false;
48 bool opt_learn =
false;
49 bool opt_auto_init =
false;
50 std::string opt_learning_data =
"learning/data-learned.bin";
51 std::string opt_intrinsic_file =
"";
52 std::string opt_camera_name =
"";
54 for (
int i = 0; i < argc; i++) {
55 if (std::string(argv[i]) ==
"--model") {
56 opt_modelname = std::string(argv[i + 1]);
58 else if (std::string(argv[i]) ==
"--tracker") {
59 opt_tracker = atoi(argv[i + 1]);
61 else if (std::string(argv[i]) ==
"--camera_device" && i + 1 < argc) {
62 opt_device = atoi(argv[i + 1]);
64 else if (std::string(argv[i]) ==
"--max_proj_error") {
65 opt_proj_error_threshold = atof(argv[i + 1]);
67 else if (std::string(argv[i]) ==
"--use_ogre") {
70 else if (std::string(argv[i]) ==
"--use_scanline") {
71 opt_use_scanline =
true;
73 else if (std::string(argv[i]) ==
"--learn") {
76 else if (std::string(argv[i]) ==
"--learning_data" && i + 1 < argc) {
77 opt_learning_data = argv[i + 1];
79 else if (std::string(argv[i]) ==
"--auto_init") {
82 else if (std::string(argv[i]) ==
"--display_proj_error") {
83 opt_display_projection_error =
true;
85 else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
86 opt_intrinsic_file = std::string(argv[i + 1]);
88 else if (std::string(argv[i]) ==
"--camera_name" && i + 1 < argc) {
89 opt_camera_name = std::string(argv[i + 1]);
91 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
93 <<
"\nUsage: " << argv[0] <<
" [--camera_device <camera device> (default: 0)]"
94 <<
" [--intrinsic <intrinsic file> (default: empty)]"
95 <<
" [--camera_name <camera name> (default: empty)]"
96 <<
" [--model <model name> (default: teabox)]"
97 <<
" [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
98 <<
" [--use_ogre] [--use_scanline]"
99 <<
" [--max_proj_error <allowed projection error> (default: 30)]"
100 <<
" [--learn] [--auto_init] [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
101 <<
" [--display_proj_error]"
102 <<
" [--help] [-h]\n"
110 if (!parentname.empty())
111 objectname = parentname +
"/" + objectname;
113 std::cout <<
"Tracker requested config files: " << objectname <<
".[init, cao]" << std::endl;
114 std::cout <<
"Tracker optional config files: " << objectname <<
".[ppm]" << std::endl;
116 std::cout <<
"Tracked features: " << std::endl;
117 std::cout <<
" Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
118 std::cout <<
" Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
119 std::cout <<
"Tracker options: " << std::endl;
120 std::cout <<
" Use ogre : " << opt_use_ogre << std::endl;
121 std::cout <<
" Use scanline: " << opt_use_scanline << std::endl;
122 std::cout <<
" Proj. error : " << opt_proj_error_threshold << std::endl;
123 std::cout <<
" Display proj. error: " << opt_display_projection_error << std::endl;
124 std::cout <<
"Config files: " << std::endl;
125 std::cout <<
" Config file : "
126 <<
"\"" << objectname +
".xml"
127 <<
"\"" << std::endl;
128 std::cout <<
" Model file : "
129 <<
"\"" << objectname +
".cao"
130 <<
"\"" << std::endl;
131 std::cout <<
" Init file : "
132 <<
"\"" << objectname +
".init"
133 <<
"\"" << std::endl;
134 std::cout <<
"Learning options : " << std::endl;
135 std::cout <<
" Learn : " << opt_learn << std::endl;
136 std::cout <<
" Auto init : " << opt_auto_init << std::endl;
137 std::cout <<
" Learning data: " << opt_learning_data << std::endl;
140#if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
152 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty())
160#if defined(VISP_HAVE_V4L2)
162 std::ostringstream device;
163 device <<
"/dev/video" << opt_device;
164 std::cout <<
"Use Video 4 Linux grabber on device " << device.str() << std::endl;
168#elif defined(VISP_HAVE_DC1394)
170 std::cout <<
"Use DC1394 grabber" << std::endl;
173#elif defined(VISP_HAVE_CMU1394)
175 std::cout <<
"Use CMU1394 grabber" << std::endl;
178#elif defined(VISP_HAVE_FLYCAPTURE)
180 std::cout <<
"Use FlyCapture grabber" << std::endl;
183#elif defined(VISP_HAVE_REALSENSE2)
185 std::cout <<
"Use Realsense 2 grabber" << std::endl;
188 config.disable_stream(RS2_STREAM_DEPTH);
189 config.disable_stream(RS2_STREAM_INFRARED);
190 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
194 std::cout <<
"Read camera parameters from Realsense device" << std::endl;
196#elif defined(HAVE_OPENCV_VIDEOIO)
197 std::cout <<
"Use OpenCV grabber on device " << opt_device << std::endl;
198 cv::VideoCapture g(opt_device);
200 std::cout <<
"Failed to open the camera" << std::endl;
210#if defined(VISP_HAVE_X11)
212#elif defined(VISP_HAVE_GDI)
214#elif defined(HAVE_OPENCV_HIGHGUI)
217 display->init(I, 100, 100,
"Model-based tracker");
220#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
221 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
223#elif defined(HAVE_OPENCV_VIDEOIO)
239 if (opt_tracker == 0)
241#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
242 else if (opt_tracker == 1)
248#if !defined(VISP_HAVE_MODULE_KLT)
249 std::cout <<
"klt and hybrid model-based tracker are not available since visp_klt module is not available. "
250 "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
253 std::cout <<
"Hybrid tracking is impossible since OpenCV is not enabled. "
254 <<
"Install OpenCV, configure and build ViSP again to run this tutorial." << std::endl;
271 if (opt_tracker == 0 || opt_tracker == 2) {
286#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
287 if (opt_tracker == 1 || opt_tracker == 2) {
322#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
323 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
324 std::string detectorName =
"SIFT";
325 std::string extractorName =
"SIFT";
326 std::string matcherName =
"BruteForce";
328 std::string detectorName =
"FAST";
329 std::string extractorName =
"ORB";
330 std::string matcherName =
"BruteForce-Hamming";
333 if (opt_learn || opt_auto_init) {
337#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
338#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
339 keypoint.setDetectorParameter(
"ORB",
"nLevels", 1);
341 cv::Ptr<cv::ORB> orb_detector = keypoint.
getDetector(
"ORB").dynamicCast<cv::ORB>();
343 orb_detector->setNLevels(1);
351 std::cout <<
"Cannot enable auto detection. Learning file \"" << opt_learning_data <<
"\" doesn't exist"
358 tracker.
initClick(I, objectname +
".init",
true);
361 bool learn_position =
false;
362 bool run_auto_init =
false;
364 run_auto_init =
true;
369 unsigned int learn_cpt = 0;
371 bool tracking_failed =
false;
375#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
376 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
378#elif defined(HAVE_OPENCV_VIDEOIO)
386 tracking_failed =
false;
388 std::cout <<
"Auto init succeed" << std::endl;
396 else if (tracking_failed) {
398 tracking_failed =
false;
399 tracker.
initClick(I, objectname +
".init",
true);
408 run_auto_init =
false;
414 tracking_failed =
true;
416 std::cout <<
"Tracker needs to restart (tracking exception)" << std::endl;
417 run_auto_init =
true;
421 if (!tracking_failed) {
422 double proj_error = 0;
432 if (proj_error > opt_proj_error_threshold) {
433 std::cout <<
"Tracker needs to restart (projection error detected: " << proj_error <<
")" << std::endl;
435 run_auto_init =
true;
437 tracking_failed =
true;
441 if (!tracking_failed) {
454 std::stringstream ss;
455 ss <<
"Translation: " << std::setprecision(5) << pose[0] <<
" " << pose[1] <<
" " << pose[2] <<
" [m]";
458 ss <<
"Rotation tu: " << std::setprecision(4) <<
vpMath::deg(pose[3]) <<
" " <<
vpMath::deg(pose[4]) <<
" "
463 std::stringstream ss;
469 if (learn_position) {
472 std::vector<cv::KeyPoint> trainKeyPoints;
473 keypoint.
detect(I, trainKeyPoints);
476 std::vector<vpPolygon> polygons;
477 std::vector<std::vector<vpPoint> > roisPt;
478 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces();
479 polygons = pair.first;
480 roisPt = pair.second;
483 std::vector<cv::Point3f> points3f;
487 keypoint.
buildReference(I, trainKeyPoints, points3f,
true, learn_id++);
490 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
493 learn_position =
false;
494 std::cout <<
"Data learned" << std::endl;
497 std::stringstream ss;
502 else if (opt_auto_init)
513 learn_position =
true;
516 run_auto_init =
true;
522 if (opt_learn && learn_cpt) {
523 std::cout <<
"Save learning from " << learn_cpt <<
" images in file: " << opt_learning_data << std::endl;
532 std::cout <<
"Catch a ViSP exception: " << e << std::endl;
534#elif defined(VISP_HAVE_OPENCV)
537 std::cout <<
"Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, "
538 "Realsense2), configure and build ViSP again to use this example"
543 std::cout <<
"Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
static const vpColor yellow
static const vpColor green
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const std::string & getStringMessage() const
void open(vpImage< unsigned char > &I)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void setMatcher(const std::string &matcherName)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
unsigned int buildReference(const vpImage< unsigned char > &I)
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double deg(double rad)
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setDisplayFeatures(bool displayF)
virtual int getTrackerType() const
virtual void setKltMaskBorder(const unsigned int &e)
virtual void setProjectionErrorComputation(const bool &flag)
virtual unsigned int getNbFeaturesEdge() const
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
virtual unsigned int getNbFeaturesKlt() const
virtual void getCameraParameters(vpCameraParameters &camera) const
virtual void setMovingEdge(const vpMe &me)
virtual void setScanLineVisibilityTest(const bool &v)
virtual void setKltOpencv(const vpKltOpencv &t)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual void setProjectionErrorDisplay(bool display)
virtual void setTrackerType(int type)
virtual void initClick(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2, bool displayHelp=false, const vpHomogeneousMatrix &T1=vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2=vpHomogeneousMatrix())
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
virtual void setOgreVisibilityTest(const bool &v)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void track(const vpImage< unsigned char > &I)
virtual double getProjectionError() const
void setMu1(const double &mu_1)
void setSampleStep(const double &s)
void setRange(const unsigned int &r)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskSize(const unsigned int &a)
void setMu2(const double &mu_2)
@ NORMALIZED_THRESHOLD
Easy-to-use normalized likelihood threshold corresponding to the minimal luminance contrast to consid...
void setMaskNumber(const unsigned int &a)
void setThreshold(const double &t)
Implementation of a pose vector and operations on poses.
void acquire(vpImage< unsigned char > &grey, double *ts=NULL)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
bool open(const rs2::config &cfg=rs2::config())
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)
VISP_EXPORT double measureTimeMs()