Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
servoAfma6Points2DCamVelocityEyeToHand.cpp
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See https://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * tests the control law
33 * eye-to-hand control
34 * velocity computed in the camera frame
35 *
36*****************************************************************************/
55#include <cmath> // std::fabs
56#include <limits> // numeric_limits
57#include <list>
58#include <stdlib.h>
59#include <visp3/core/vpConfig.h>
60#include <visp3/core/vpDebug.h> // Debug trace
61#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394))
62
63#include <visp3/blob/vpDot.h>
64#include <visp3/core/vpDisplay.h>
65#include <visp3/core/vpException.h>
66#include <visp3/core/vpHomogeneousMatrix.h>
67#include <visp3/core/vpImage.h>
68#include <visp3/core/vpImagePoint.h>
69#include <visp3/core/vpIoTools.h>
70#include <visp3/core/vpMath.h>
71#include <visp3/core/vpPoint.h>
72#include <visp3/gui/vpDisplayGTK.h>
73#include <visp3/gui/vpDisplayOpenCV.h>
74#include <visp3/gui/vpDisplayX.h>
75#include <visp3/io/vpImageIo.h>
76#include <visp3/robot/vpRobotAfma6.h>
77#include <visp3/sensor/vp1394TwoGrabber.h>
78#include <visp3/vision/vpPose.h>
79#include <visp3/visual_features/vpFeatureBuilder.h>
80#include <visp3/visual_features/vpFeaturePoint.h>
81#include <visp3/vs/vpServo.h>
82#include <visp3/vs/vpServoDisplay.h>
83
84#define SAVE 0
85#define L 0.006
86#define D 0
87
88int main()
89{
90 try {
91 std::string username = vpIoTools::getUserName();
92 std::string logdirname = "/tmp/" + username;
93 if (SAVE) {
94 if (vpIoTools::checkDirectory(logdirname) == false) {
95 try {
96 // Create the dirname
97 vpIoTools::makeDirectory(logdirname);
98 } catch (...) {
99 std::cerr << std::endl << "ERROR:" << std::endl;
100 std::cerr << " Cannot create " << logdirname << std::endl;
101 return EXIT_FAILURE;
102 }
103 }
104 }
105 vpServo task;
106
109 int i;
110
114 g.open(I);
115
116 g.acquire(I);
117
118#ifdef VISP_HAVE_X11
119 vpDisplayX display(I, 100, 100, "Current image");
120#elif defined(HAVE_OPENCV_HIGHGUI)
121 vpDisplayOpenCV display(I, 100, 100, "Current image");
122#elif defined(VISP_HAVE_GTK)
123 vpDisplayGTK display(I, 100, 100, "Current image");
124#endif
125
128
129 std::cout << std::endl;
130 std::cout << "-------------------------------------------------------" << std::endl;
131 std::cout << " Test program for vpServo " << std::endl;
132 std::cout << " Eye-to-hand task control" << std::endl;
133 std::cout << " Simulation " << std::endl;
134 std::cout << " task : servo a point " << std::endl;
135 std::cout << "-------------------------------------------------------" << std::endl;
136 std::cout << std::endl;
137
138 int nbPoint = 7;
139
140 vpDot dot[nbPoint];
141 vpImagePoint cog;
142
143 for (i = 0; i < nbPoint; i++) {
144 dot[i].initTracking(I);
145 dot[i].setGraphics(true);
146 dot[i].track(I);
148 dot[i].setGraphics(false);
149 }
150
151 // Compute the pose 3D model
152 vpPoint point[nbPoint];
153 point[0].setWorldCoordinates(-2 * L, D, -3 * L);
154 point[1].setWorldCoordinates(0, D, -3 * L);
155 point[2].setWorldCoordinates(2 * L, D, -3 * L);
156
157 point[3].setWorldCoordinates(-L, D, -L);
158 point[4].setWorldCoordinates(L, D, -L);
159 point[5].setWorldCoordinates(L, D, L);
160 point[6].setWorldCoordinates(-L, D, L);
161
162 vpRobotAfma6 robot;
163 // Update camera parameters
164 robot.getCameraParameters(cam, I);
165
166 vpHomogeneousMatrix cMo, cdMo;
167 vpPose pose;
168 pose.clearPoint();
169 for (i = 0; i < nbPoint; i++) {
170 cog = dot[i].getCog();
171 double x = 0, y = 0;
173 point[i].set_x(x);
174 point[i].set_y(y);
175 pose.addPoint(point[i]);
176 }
177
178 // compute the initial pose using Dementhon method followed by a non
179 // linear minimization method
181
182 std::cout << cMo << std::endl;
183 cMo.print();
184
185 /*------------------------------------------------------------------
186 -- Learning the desired position
187 -- or reading the desired position
188 ------------------------------------------------------------------
189 */
190 std::cout << " Learning 0/1 " << std::endl;
191 std::string name = "cdMo.dat";
192 int learning;
193 std::cin >> learning;
194 if (learning == 1) {
195 // save the object position
196 vpTRACE("Save the location of the object in a file cdMo.dat");
197 std::ofstream f(name.c_str());
198 cMo.save(f);
199 f.close();
200 exit(1);
201 }
202
203 {
204 vpTRACE("Loading desired location from cdMo.dat");
205 std::ifstream f("cdMo.dat");
206 cdMo.load(f);
207 f.close();
208 }
209
210 vpFeaturePoint p[nbPoint], pd[nbPoint];
211
212 // set the desired position of the point by forward projection using
213 // the pose cdMo
214 for (i = 0; i < nbPoint; i++) {
215 vpColVector cP, p;
216 point[i].changeFrame(cdMo, cP);
217 point[i].projection(cP, p);
218
219 pd[i].set_x(p[0]);
220 pd[i].set_y(p[1]);
221 }
222
223 //------------------------------------------------------------------
224
225 vpTRACE("define the task");
226 vpTRACE("\t we want an eye-in-hand control law");
227 vpTRACE("\t robot is controlled in the camera frame");
230
231 for (i = 0; i < nbPoint; i++) {
232 task.addFeature(p[i], pd[i]);
233 }
234
235 vpTRACE("Display task information ");
236 task.print();
237
238 //------------------------------------------------------------------
239
240 double convergence_threshold = 0.00; // 025 ;
242
243 //-------------------------------------------------------------
244 double error = 1;
245 unsigned int iter = 0;
246 vpTRACE("\t loop");
248 vpColVector v; // computed robot velocity
249
250 // position of the object in the effector frame
251 vpHomogeneousMatrix oMcamrobot;
252 oMcamrobot[0][3] = -0.05;
253
255 int it = 0;
256
257 double lambda_av = 0.1;
258 double alpha = 1; // 1 ;
259 double beta = 3; // 3 ;
260
261 std::cout << "alpha 0.7" << std::endl;
262 std::cin >> alpha;
263 std::cout << "beta 5" << std::endl;
264 std::cin >> beta;
265 std::list<vpImagePoint> Lcog;
266 vpImagePoint ip;
267 while (error > convergence_threshold) {
268 std::cout << "---------------------------------------------" << iter++ << std::endl;
269
270 g.acquire(I);
272 ip.set_i(265);
273 ip.set_j(150);
274 vpDisplay::displayText(I, ip, "Eye-To-Hand Visual Servoing", vpColor::green);
275 ip.set_i(280);
276 ip.set_j(150);
277 vpDisplay::displayText(I, ip, "IRISA-INRIA Rennes, Lagadic project", vpColor::green);
278 try {
279 for (i = 0; i < nbPoint; i++) {
280 dot[i].track(I);
281 Lcog.push_back(dot[i].getCog());
282 }
283 } catch (...) {
284 vpTRACE("Error detected while tracking visual features");
285 robot.stopMotion();
286 exit(1);
287 }
288
289 // compute the initial pose using a non linear minimization method
290 pose.clearPoint();
291
292 for (i = 0; i < nbPoint; i++) {
293 double x = 0, y = 0;
294 cog = dot[i].getCog();
296 point[i].set_x(x);
297 point[i].set_y(y);
298
299 vpColVector cP;
300 point[i].changeFrame(cdMo, cP);
301
302 p[i].set_x(x);
303 p[i].set_y(y);
304 p[i].set_Z(cP[2]);
305
306 pose.addPoint(point[i]);
307
308 point[i].display(I, cMo, cam, vpColor::green);
309 point[i].display(I, cdMo, cam, vpColor::blue);
310 }
311 pose.computePose(vpPose::LOWE, cMo);
313
315 vpHomogeneousMatrix cMe, camrobotMe;
316 robot.get_cMe(camrobotMe);
317 cMe = cMo * oMcamrobot * camrobotMe;
318
319 task.set_cVe(cMe);
320
321 vpMatrix eJe;
322 robot.get_eJe(eJe);
323 task.set_eJe(eJe);
324
325 // Compute the adaptative gain (speed up the convergence)
326 double gain;
327 if (iter > 2) {
328 if (std::fabs(alpha) <= std::numeric_limits<double>::epsilon())
329 gain = lambda_av;
330 else {
331 gain = alpha * exp(-beta * (task.getError()).sumSquare()) + lambda_av;
332 }
333 } else
334 gain = lambda_av;
335 if (SAVE == 1)
336 gain = gain / 5;
337
338 vpTRACE("%f %f %f %f %f", alpha, beta, lambda_av, (task.getError()).sumSquare(), gain);
339 task.setLambda(gain);
340
341 v = task.computeControlLaw();
342
343 // display points trajectory
344 for (std::list<vpImagePoint>::const_iterator it_cog = Lcog.begin(); it_cog != Lcog.end(); ++it_cog) {
346 }
347 vpServoDisplay::display(task, cam, I);
349
350 error = (task.getError()).sumSquare();
351 std::cout << "|| s - s* || = " << error << std::endl;
352
353 if (error > 7) {
354 vpTRACE("Error detected while tracking visual features");
355 robot.stopMotion();
356 exit(1);
357 }
358
359 // display the pose
360 // pose.display(I,cMo,cam, 0.04, vpColor::red) ;
361 // display the pose
362 // pose.display(I,cdMo,cam, 0.04, vpColor::blue) ;
363 if ((SAVE == 1) && (iter % 3 == 0)) {
364
365 vpDisplay::getImage(I, Ic);
366 std::stringstream ss;
367 ss << logdirname;
368 ss << "/image.";
369 ss << std::setfill('0') << std::setw(4);
370 ss << it++;
371 ss << ".ppm";
372 vpImageIo::write(Ic, ss.str());
373 }
374 }
375 v = 0;
378 return EXIT_SUCCESS;
379 } catch (const vpException &e) {
380 std::cout << "Test failed with exception: " << e << std::endl;
381 return EXIT_FAILURE;
382 }
383}
384
385#else
386int main()
387{
388 std::cout << "You do not have an afma6 robot connected to your computer..." << std::endl;
389 return EXIT_SUCCESS;
390}
391
392#endif
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void acquire(vpImage< unsigned char > &I)
void setVideoMode(vp1394TwoVideoModeType videomode)
void setFramerate(vp1394TwoFramerateType fps)
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
static const vpColor red
Definition vpColor.h:211
static const vpColor blue
Definition vpColor.h:217
static const vpColor green
Definition vpColor.h:214
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:132
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void getImage(const vpImage< unsigned char > &Is, vpImage< vpRGBa > &Id)
static void flush(const vpImage< unsigned char > &I)
static void displayPoint(const vpImage< unsigned char > &I, const vpImagePoint &ip, const vpColor &color, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
This tracker is meant to track a dot (connected pixels with same gray level) on a vpImage.
Definition vpDot.h:112
void initTracking(const vpImage< unsigned char > &I)
Definition vpDot.cpp:617
void setGraphics(bool activate)
Definition vpDot.h:357
vpImagePoint getCog() const
Definition vpDot.h:243
void track(const vpImage< unsigned char > &I)
Definition vpDot.cpp:757
error that can be emitted by ViSP classes.
Definition vpException.h:59
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
void set_y(double y)
void set_x(double x)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void load(std::ifstream &f)
void print() const
Print the matrix as a pose vector .
void save(std::ofstream &f) const
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_j(double jj)
void set_i(double ii)
Definition of the vpImage class member functions.
Definition vpImage.h:135
static bool checkDirectory(const std::string &dirname)
static std::string getUserName()
static void makeDirectory(const std::string &dirname)
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:152
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition vpPoint.h:77
void set_x(double x)
Set the point x coordinate in the image plane.
Definition vpPoint.cpp:508
void projection(const vpColVector &_cP, vpColVector &_p) const
Definition vpPoint.cpp:219
void display(const vpImage< unsigned char > &I, const vpCameraParameters &cam, const vpColor &color=vpColor::green, unsigned int thickness=1)
Definition vpPoint.cpp:427
void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const
Definition vpPoint.cpp:236
void setWorldCoordinates(double oX, double oY, double oZ)
Definition vpPoint.cpp:110
void set_y(double y)
Set the point y coordinate in the image plane.
Definition vpPoint.cpp:510
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition vpPose.h:81
void addPoint(const vpPoint &P)
Definition vpPose.cpp:140
@ DEMENTHON_LAGRANGE_VIRTUAL_VS
Definition vpPose.h:102
@ LOWE
Definition vpPose.h:88
void clearPoint()
Definition vpPose.cpp:125
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition vpPose.cpp:469
Control of Irisa's gantry robot named Afma6.
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
void get_eJe(vpMatrix &eJe)
@ ARTICULAR_FRAME
Definition vpRobot.h:76
@ CAMERA_FRAME
Definition vpRobot.h:80
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
Definition vpRobot.h:64
virtual vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState)
Definition vpRobot.cpp:198
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Definition vpServo.cpp:564
@ EYETOHAND_L_cVe_eJe
Definition vpServo.h:160
void set_cVe(const vpVelocityTwistMatrix &cVe_)
Definition vpServo.h:448
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition vpServo.cpp:299
void setLambda(double c)
Definition vpServo.h:403
void set_eJe(const vpMatrix &eJe_)
Definition vpServo.h:506
void setServo(const vpServoType &servo_type)
Definition vpServo.cpp:210
vpColVector getError() const
Definition vpServo.h:276
vpColVector computeControlLaw()
Definition vpServo.cpp:930
@ CURRENT
Definition vpServo.h:179
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, unsigned int select=vpBasicFeature::FEATURE_ALL)
Definition vpServo.cpp:487
vpHomogeneousMatrix get_cMe() const
Definition vpUnicycle.h:71
#define vpTRACE
Definition vpDebug.h:411