Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
servoAfma6Points2DCamVelocityEyeToHand.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * tests the control law
32 * eye-to-hand control
33 * velocity computed in the camera frame
34 */
35
44
45#include <iostream>
46#include <visp3/core/vpConfig.h>
47
48#if defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY)
49
50#include <visp3/core/vpImage.h>
51#include <visp3/core/vpIoTools.h>
52#include <visp3/gui/vpDisplayFactory.h>
53#include <visp3/io/vpImageIo.h>
54#include <visp3/sensor/vpRealSense2.h>
55#include <visp3/blob/vpDot2.h>
56#include <visp3/robot/vpRobotAfma6.h>
57#include <visp3/vision/vpPose.h>
58#include <visp3/visual_features/vpFeatureBuilder.h>
59#include <visp3/visual_features/vpFeaturePoint.h>
60#include <visp3/vs/vpServo.h>
61#include <visp3/vs/vpServoDisplay.h>
62
63#define SAVE 0
64#define L 0.006
65#define D 0
66
67int main()
68{
69#ifdef ENABLE_VISP_NAMESPACE
70 using namespace VISP_NAMESPACE_NAME;
71#endif
72
73 try {
74 std::string username = vpIoTools::getUserName();
75 std::string logdirname = "/tmp/" + username;
76 if (SAVE) {
77 if (vpIoTools::checkDirectory(logdirname) == false) {
78 try {
79 // Create the dirname
80 vpIoTools::makeDirectory(logdirname);
81 }
82 catch (...) {
83 std::cerr << std::endl << "ERROR:" << std::endl;
84 std::cerr << " Cannot create " << logdirname << std::endl;
85 return EXIT_FAILURE;
86 }
87 }
88 }
89
90 vpRealSense2 rs;
91 rs2::config config;
92 unsigned int width = 640, height = 480, fps = 60;
93 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
94 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
95 config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps);
96 rs.open(config);
97
98 // Warm up camera
100 for (size_t i = 0; i < 10; ++i) {
101 rs.acquire(I);
102 }
103
104 std::shared_ptr<vpDisplay> d = vpDisplayFactory::createDisplay(I, 100, 100, "Current image");
105
108
109 std::cout << "-------------------------------------------------------" << std::endl;
110 std::cout << " Test program for vpServo " << std::endl;
111 std::cout << " Eye-to-hand task control" << std::endl;
112 std::cout << " Simulation " << std::endl;
113 std::cout << " task : servo a point " << std::endl;
114 std::cout << "-------------------------------------------------------" << std::endl;
115
116 int nbPoint = 7;
117
118 vpDot dot[nbPoint];
119 vpImagePoint cog;
120
121 for (int i = 0; i < nbPoint; ++i) {
122 dot[i].initTracking(I);
123 dot[i].setGraphics(true);
124 dot[i].track(I);
126 dot[i].setGraphics(false);
127 }
128
129 // Compute the pose 3D model
130 vpPoint point[nbPoint];
131 point[0].setWorldCoordinates(-2 * L, D, -3 * L);
132 point[1].setWorldCoordinates(0, D, -3 * L);
133 point[2].setWorldCoordinates(2 * L, D, -3 * L);
134
135 point[3].setWorldCoordinates(-L, D, -L);
136 point[4].setWorldCoordinates(L, D, -L);
137 point[5].setWorldCoordinates(L, D, L);
138 point[6].setWorldCoordinates(-L, D, L);
139
140 vpRobotAfma6 robot;
142
143 // Get camera intrinsics
145 robot.getCameraParameters(cam, I);
146
147 vpHomogeneousMatrix c_M_o, cd_M_o;
148 vpPose pose;
149 pose.clearPoint();
150 for (int i = 0; i < nbPoint; ++i) {
151 cog = dot[i].getCog();
152 double x = 0, y = 0;
154 point[i].set_x(x);
155 point[i].set_y(y);
156 pose.addPoint(point[i]);
157 }
158
159 // compute the initial pose using Dementhon method followed by a non
160 // linear minimization method
162
163 std::cout << "c_M_o: \n" << c_M_o << std::endl;
164
165 /*
166 * Learning or reading the desired position
167 */
168 std::cout << "Learning (0/1)? " << std::endl;
169 std::string filename = "cd_M_o.dat";
170 int learning;
171 std::cin >> learning;
172 if (learning == 1) {
173 // save the object position
174 std::cout << "Save the location of the object cMo in " << filename << std::endl;
175 c_M_o.save(filename);
176 return EXIT_SUCCESS;
177 }
178
179 std::cout << "Loading desired location of the object cMo from " << filename << std::endl;
180 cd_M_o.load(filename);
181
182 vpFeaturePoint s[nbPoint], s_d[nbPoint];
183
184 // Set the desired position of the point by forward projection using the pose cd_M_o
185 for (int i = 0; i < nbPoint; ++i) {
186 vpColVector cP, p;
187 point[i].changeFrame(cd_M_o, cP);
188 point[i].projection(cP, p);
189
190 s_d[i].set_x(p[0]);
191 s_d[i].set_y(p[1]);
192 }
193
194 // Define the task
195 // - we want an eye-in-hand control law
196 // - robot is controlled in the camera frame
199 task.setInteractionMatrixType(vpServo::CURRENT);
200
201 // - we want to see a point on a point
202 for (int i = 0; i < nbPoint; ++i) {
203 task.addFeature(s[i], s_d[i]);
204 }
205
206 // - display task information
207 task.print();
208
209 double convergence_threshold = 0.00; // 025 ;
210
211 double error = 1;
212 unsigned int iter = 0;
213 robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
214
215 // position of the object in the effector frame
216 vpHomogeneousMatrix o_M_camrobot;
217 o_M_camrobot[0][3] = -0.05;
218
219 int it = 0;
220
221 std::list<vpImagePoint> Lcog;
222 bool quit = false;
223 while ((error > convergence_threshold) && (!quit)) {
224 std::cout << "---------------------------------------------" << iter++ << std::endl;
225
226 rs.acquire(I);
228
229 try {
230 for (int i = 0; i < nbPoint; ++i) {
231 dot[i].track(I);
232 Lcog.push_back(dot[i].getCog());
233 }
234 }
235 catch (...) {
236 std::cout << "Error detected while tracking visual features" << std::endl;
237 robot.stopMotion();
238 return EXIT_FAILURE;
239 }
240
241 // compute the initial pose using a non linear minimization method
242 pose.clearPoint();
243
244 for (int i = 0; i < nbPoint; ++i) {
245 double x = 0, y = 0;
246 cog = dot[i].getCog();
248 point[i].set_x(x);
249 point[i].set_y(y);
250
251 vpColVector cP;
252 point[i].changeFrame(cd_M_o, cP);
253
254 s[i].set_x(x);
255 s[i].set_y(y);
256 s[i].set_Z(cP[2]);
257
258 pose.addPoint(point[i]);
259
260 point[i].display(I, c_M_o, cam, vpColor::green);
261 point[i].display(I, cd_M_o, cam, vpColor::blue);
262 }
263 pose.computePose(vpPose::LOWE, c_M_o);
264
265 // - set the camera to end-effector velocity twist matrix transformation
266 vpHomogeneousMatrix c_M_e, camrobot_M_e;
267 robot.get_cMe(camrobot_M_e);
268 c_M_e = c_M_o * o_M_camrobot * camrobot_M_e;
269
270 task.set_cVe(c_M_e);
271
272 // - set the Jacobian (expressed in the end-effector frame)
273 vpMatrix e_J_e;
274 robot.get_eJe(e_J_e);
275 task.set_eJe(e_J_e);
276
277 // - set the task adaptive gain
278 vpAdaptiveGain lambda_adaptive;
279 lambda_adaptive.initStandard(1.7, 0.3, 1.5); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30
280 task.setLambda(lambda_adaptive);
281
282 vpColVector qdot = task.computeControlLaw();
283
284 // Display points trajectory
285 for (std::list<vpImagePoint>::const_iterator it_cog = Lcog.begin(); it_cog != Lcog.end(); ++it_cog) {
287 }
288
289 // Display task visual features feature
290 vpServoDisplay::display(task, cam, I);
291
292 // Apply joint velocity to the robot
293 robot.setVelocity(vpRobot::JOINT_STATE, qdot);
294
295 error = (task.getError()).sumSquare();
296 std::cout << "|| s - s* || = " << error << std::endl;
297
298 if (error > 7) {
299 std::cout << "Error detected while tracking visual features" << std::endl;
300 robot.stopMotion();
301 return EXIT_FAILURE;
302 }
303
304 if ((SAVE == 1) && (iter % 3 == 0)) {
306 vpDisplay::getImage(I, Ic);
307 std::string filename = vpIoTools::formatString(logdirname + "/image.%04d.png", it++);
308 vpImageIo::write(Ic, filename);
309 }
310
311 vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red);
312 if (vpDisplay::getClick(I, false)) {
313 quit = true;
314 }
315
317 }
318
319 return EXIT_SUCCESS;
320 }
321 catch (const vpException &e) {
322 std::cout << "Visual servo failed with exception: " << e << std::endl;
323 return EXIT_FAILURE;
324 }
325}
326
327#else
328int main()
329{
330 std::cout << "You do not have an afma6 robot connected to your computer..." << std::endl;
331 return EXIT_SUCCESS;
332}
333
334#endif
Adaptive gain computation.
void initStandard(double gain_at_zero, double gain_at_infinity, double slope_at_zero)
@ TOOL_INTEL_D435_CAMERA
Definition vpAfma6.h:129
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
Implementation of column vector and the associated operations.
static const vpColor red
Definition vpColor.h:198
static const vpColor blue
Definition vpColor.h:204
static const vpColor green
Definition vpColor.h:201
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void getImage(const vpImage< unsigned char > &Is, vpImage< vpRGBa > &Id)
static void flush(const vpImage< unsigned char > &I)
static void displayPoint(const vpImage< unsigned char > &I, const vpImagePoint &ip, const vpColor &color, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
This tracker is meant to track a dot (connected pixels with same gray level) on a vpImage.
Definition vpDot.h:123
void initTracking(const vpImage< unsigned char > &I)
Definition vpDot.cpp:630
void setGraphics(bool activate)
Definition vpDot.h:362
vpImagePoint getCog() const
Definition vpDot.h:255
void track(const vpImage< unsigned char > &I)
Definition vpDot.cpp:760
error that can be emitted by ViSP classes.
Definition vpException.h:60
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
void set_y(double y)
void set_x(double x)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void load(std::ifstream &f)
void save(std::ofstream &f) const
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Definition vpImage.h:131
static bool checkDirectory(const std::string &dirname)
static std::string formatString(const std::string &name, unsigned int val)
static std::string getUserName()
static void makeDirectory(const std::string &dirname)
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:175
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition vpPoint.h:79
void set_x(double x)
Set the point x coordinate in the image plane.
Definition vpPoint.cpp:471
void projection(const vpColVector &_cP, vpColVector &_p) const VP_OVERRIDE
Definition vpPoint.cpp:252
void display(const vpImage< unsigned char > &I, const vpCameraParameters &cam, const vpColor &color=vpColor::green, unsigned int thickness=1) VP_OVERRIDE
Definition vpPoint.cpp:387
void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const VP_OVERRIDE
Definition vpPoint.cpp:273
void setWorldCoordinates(double oX, double oY, double oZ)
Definition vpPoint.cpp:116
void set_y(double y)
Set the point y coordinate in the image plane.
Definition vpPoint.cpp:473
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition vpPose.h:82
void addPoint(const vpPoint &P)
Definition vpPose.cpp:96
@ DEMENTHON_LAGRANGE_VIRTUAL_VS
Definition vpPose.h:103
@ LOWE
Definition vpPose.h:89
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, FuncCheckValidityPose func=nullptr)
Definition vpPose.cpp:385
void clearPoint()
Definition vpPose.cpp:89
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
Control of Irisa's gantry robot named Afma6.
void init(void)
@ JOINT_STATE
Definition vpRobot.h:79
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
Definition vpRobot.h:64
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
@ EYETOHAND_L_cVe_eJe
Definition vpServo.h:190
@ CURRENT
Definition vpServo.h:217
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.