Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
servoAfma6AprilTagPBVS.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Data acquisition with RealSense RGB-D sensor and Franka robot.
32 */
33
48
49#include <iostream>
50
51#include <visp3/core/vpConfig.h>
52
53#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_AFMA6)
54
55#include <visp3/core/vpCameraParameters.h>
56#include <visp3/detection/vpDetectorAprilTag.h>
57#include <visp3/gui/vpDisplayFactory.h>
58#include <visp3/gui/vpPlot.h>
59#include <visp3/io/vpImageIo.h>
60#include <visp3/robot/vpRobotAfma6.h>
61#include <visp3/sensor/vpRealSense2.h>
62#include <visp3/visual_features/vpFeatureThetaU.h>
63#include <visp3/visual_features/vpFeatureTranslation.h>
64#include <visp3/vs/vpServo.h>
65#include <visp3/vs/vpServoDisplay.h>
66
67#ifdef ENABLE_VISP_NAMESPACE
68using namespace VISP_NAMESPACE_NAME;
69#endif
70
71void display_point_trajectory(const vpImage<unsigned char> &I,
72 const std::vector<vpImagePoint> &vip,
73 std::vector<vpImagePoint> *traj_vip)
74{
75 for (size_t i = 0; i < vip.size(); ++i) {
76 if (traj_vip[i].size()) {
77 // Add the point only if distance with the previous > 1 pixel
78 if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) {
79 traj_vip[i].push_back(vip[i]);
80 }
81 }
82 else {
83 traj_vip[i].push_back(vip[i]);
84 }
85 }
86 for (size_t i = 0; i < vip.size(); ++i) {
87 for (size_t j = 1; j < traj_vip[i].size(); ++j) {
88 vpDisplay::displayLine(I, traj_vip[i][j - 1], traj_vip[i][j], vpColor::green, 2);
89 }
90 }
91}
92
93int main(int argc, char **argv)
94{
95 double opt_tagSize = 0.120;
96 int opt_quad_decimate = 2;
97 bool opt_verbose = false;
98 bool opt_plot = false;
99 bool opt_adaptive_gain = false;
100 bool opt_task_sequencing = false;
101 double opt_convergence_threshold_t = 0.0005; // Value in [m]
102 double opt_convergence_threshold_tu = 0.5; // Value in [deg]
103
104 bool display_tag = true;
105
106 for (int i = 1; i < argc; ++i) {
107 if ((std::string(argv[i]) == "--tag-size") && (i + 1 < argc)) {
108 opt_tagSize = std::stod(argv[++i]);
109 }
110 else if ((std::string(argv[i]) == "--tag-quad-decimate") && (i + 1 < argc)) {
111 opt_quad_decimate = std::stoi(argv[++i]);
112 }
113 else if (std::string(argv[i]) == "--verbose") {
114 opt_verbose = true;
115 }
116 else if (std::string(argv[i]) == "--plot") {
117 opt_plot = true;
118 }
119 else if (std::string(argv[i]) == "--adaptive-gain") {
120 opt_adaptive_gain = true;
121 }
122 else if (std::string(argv[i]) == "--task-sequencing") {
123 opt_task_sequencing = true;
124 }
125 else if (std::string(argv[i]) == "--no-convergence-threshold") {
126 opt_convergence_threshold_t = 0.;
127 opt_convergence_threshold_tu = 0.;
128 }
129 else if ((std::string(argv[i]) == "--help") || (std::string(argv[i]) == "-h")) {
130 std::cout
131 << argv[0]
132 << " [--tag-size <marker size in meter; default " << opt_tagSize << ">]"
133 << " [--tag-quad-decimate <decimation; default " << opt_quad_decimate << ">]"
134 << " [--adaptive-gain]"
135 << " [--plot]"
136 << " [--task-sequencing]"
137 << " [--no-convergence-threshold]"
138 << " [--verbose]"
139 << " [--help] [-h]"
140 << std::endl;
141 return EXIT_SUCCESS;
142 }
143 }
144
145 vpRobotAfma6 robot;
147
148 // Load the end-effector to camera frame transformation obtained
149 // using a camera intrinsic model with distortion
150 robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel);
151
152 try {
153 std::cout << "WARNING: This example will move the robot! "
154 << "Please make sure to have the user stop button at hand!" << std::endl
155 << "Press Enter to continue..." << std::endl;
156 std::cin.ignore();
157
158 vpRealSense2 rs;
159 rs2::config config;
160 unsigned int width = 640, height = 480, fps = 60;
161 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
162 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
163 config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps);
164 rs.open(config);
165
166 // Warm up camera
168 for (size_t i = 0; i < 10; ++i) {
169 rs.acquire(I);
170 }
171
172 // Get camera intrinsics
174 robot.getCameraParameters(cam, I);
175 std::cout << "cam:\n" << cam << std::endl;
176
177 std::shared_ptr<vpDisplay> d = vpDisplayFactory::createDisplay(I, 10, 10, "Current image");
178
181 // vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::BEST_RESIDUAL_VIRTUAL_VS;
182 vpDetectorAprilTag detector(tagFamily);
183 detector.setAprilTagPoseEstimationMethod(poseEstimationMethod);
184 detector.setDisplayTag(display_tag);
185 detector.setAprilTagQuadDecimate(opt_quad_decimate);
186 detector.setZAlignedWithCameraAxis(true);
187
188 // Tag frame for pose estimation is the following
189 // - When using
190 // detector.setZAlignedWithCameraAxis(false);
191 // detector.detect();
192 // we consider the tag frame (o) such as z_o axis is not aligned with camera frame
193 // (meaning z_o axis is facing the camera)
194 //
195 // 3 y 2
196 // |
197 // o--x
198 //
199 // 0 1
200 //
201 // In that configuration, it is more difficult to set a desired camera pose c_M_o.
202 // To ease this step we can introduce an extra transformation matrix o'_M_o to align the axis
203 // with the camera frame:
204 //
205 // o'--x
206 // |
207 // y
208 //
209 // where
210 // | 1 0 0 0 |
211 // o'_M_o = | 0 -1 0 0 |
212 // | 0 0 -1 0 |
213 // | 0 0 0 1 |
214 //
215 // Defining the desired camera pose in frame o' becomes than easier.
216 //
217 // - When using rather
218 // detector.setZAlignedWithCameraAxis(true);
219 // detector.detect();
220 // we consider the tag frame (o) such as z_o axis is aligned with camera frame
221 //
222 // 3 2
223 //
224 // o--x
225 // |
226 // 0 y 1
227 //
228 // In that configuration, it is easier to define a desired camera pose c_M_o since all the axis
229 // (camera frame and tag frame are aligned)
230
231 // Servo
232 vpHomogeneousMatrix cd_M_c, c_M_o, o_M_o;
233
234 // Desired pose to reach
235 vpHomogeneousMatrix cd_M_o(vpTranslationVector(0, 0, opt_tagSize * 3.5), // 3.5 times tag with along camera z axis
237 if (!detector.isZAlignedWithCameraAxis()) {
238 vpHomogeneousMatrix oprim_M_o = vpHomogeneousMatrix({ 1, 0, 0, 0,
239 0, -1, 0, 0,
240 0, 0, -1, 0,
241 0, 0, 0, 1 });
242 cd_M_o *= oprim_M_o;
243 }
244
245 cd_M_c = cd_M_o * c_M_o.inverse();
246
247 // Create current visual features
250 s_t.buildFrom(cd_M_c);
251 s_tu.buildFrom(cd_M_c);
252
253 // Create desired visual features
256
258 task.addFeature(s_t, s_t_d);
259 task.addFeature(s_tu, s_tu_d);
261 task.setInteractionMatrixType(vpServo::CURRENT);
262
263 // Set the gain
264 if (opt_adaptive_gain) {
265 vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30
266 task.setLambda(lambda);
267 }
268 else {
269 task.setLambda(0.5);
270 }
271
272 vpPlot *plotter = nullptr;
273 int iter_plot = 0;
274
275 if (opt_plot) {
276 plotter = new vpPlot(2, static_cast<int>(250 * 2), 500, static_cast<int>(I.getWidth()) + 80, 10,
277 "Real time curves plotter");
278 plotter->setTitle(0, "Visual features error");
279 plotter->setTitle(1, "Camera velocities");
280 plotter->initGraph(0, 6);
281 plotter->initGraph(1, 6);
282 plotter->setLegend(0, 0, "error_feat_tx");
283 plotter->setLegend(0, 1, "error_feat_ty");
284 plotter->setLegend(0, 2, "error_feat_tz");
285 plotter->setLegend(0, 3, "error_feat_theta_ux");
286 plotter->setLegend(0, 4, "error_feat_theta_uy");
287 plotter->setLegend(0, 5, "error_feat_theta_uz");
288 plotter->setLegend(1, 0, "vc_x");
289 plotter->setLegend(1, 1, "vc_y");
290 plotter->setLegend(1, 2, "vc_z");
291 plotter->setLegend(1, 3, "wc_x");
292 plotter->setLegend(1, 4, "wc_y");
293 plotter->setLegend(1, 5, "wc_z");
294 }
295
296 bool final_quit = false;
297 bool has_converged = false;
298 bool send_velocities = false;
299 bool servo_started = false;
300 std::vector<vpImagePoint> *traj_vip = nullptr; // To memorize point trajectory
301
302 static double t_init_servo = vpTime::measureTimeMs();
303
304 robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
305
306 while (!has_converged && !final_quit) {
307 double t_start = vpTime::measureTimeMs();
308
309 rs.acquire(I);
310
312
313 std::vector<vpHomogeneousMatrix> c_M_o_vec;
314 detector.detect(I, opt_tagSize, cam, c_M_o_vec);
315
316 std::stringstream ss;
317 ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit.";
318 vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
319
320 vpColVector v_c(6);
321
322 // Only one tag is detected
323 if (c_M_o_vec.size() == 1) {
324 c_M_o = c_M_o_vec[0];
325
326 static bool first_time = true;
327 if (first_time) {
328 // Introduce security wrt tag positioning in order to avoid PI rotation
329 std::vector<vpHomogeneousMatrix> v_o_M_o(2), v_cd_M_c(2);
330 v_o_M_o[1].buildFrom(0, 0, 0, 0, 0, M_PI);
331 for (size_t i = 0; i < 2; ++i) {
332 v_cd_M_c[i] = cd_M_o * v_o_M_o[i] * c_M_o.inverse();
333 }
334 if (std::fabs(v_cd_M_c[0].getThetaUVector().getTheta()) < std::fabs(v_cd_M_c[1].getThetaUVector().getTheta())) {
335 o_M_o = v_o_M_o[0];
336 }
337 else {
338 std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl;
339 o_M_o = v_o_M_o[1]; // Introduce PI rotation
340 }
341 }
342
343 // Update current visual features
344 cd_M_c = cd_M_o * o_M_o * c_M_o.inverse();
345 s_t.buildFrom(cd_M_c);
346 s_tu.buildFrom(cd_M_c);
347
348 if (opt_task_sequencing) {
349 if (!servo_started) {
350 if (send_velocities) {
351 servo_started = true;
352 }
353 t_init_servo = vpTime::measureTimeMs();
354 }
355 v_c = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.);
356 }
357 else {
358 v_c = task.computeControlLaw();
359 }
360
361 // Display desired and current pose features
362 vpDisplay::displayFrame(I, cd_M_o * o_M_o, cam, opt_tagSize / 1.5, vpColor::yellow, 2);
363 vpDisplay::displayFrame(I, c_M_o, cam, opt_tagSize / 2, vpColor::none, 3);
364 // Get tag corners
365 std::vector<vpImagePoint> vip = detector.getPolygon(0);
366 // Get the tag cog corresponding to the projection of the tag frame in the image
367 vip.push_back(detector.getCog(0));
368 // Display the trajectory of the points
369 if (first_time) {
370 traj_vip = new std::vector<vpImagePoint>[vip.size()];
371 }
372 display_point_trajectory(I, vip, traj_vip);
373
374 if (opt_plot) {
375 plotter->plot(0, iter_plot, task.getError());
376 plotter->plot(1, iter_plot, v_c);
377 iter_plot++;
378 }
379
380 if (opt_verbose) {
381 std::cout << "v_c: " << v_c.t() << std::endl;
382 }
383
385 vpThetaUVector cd_tu_c = cd_M_c.getThetaUVector();
386 double error_tr = sqrt(cd_t_c.sumSquare());
387 double error_tu = vpMath::deg(sqrt(cd_tu_c.sumSquare()));
388
389 ss.str("");
390 ss << "error_t: " << error_tr;
391 vpDisplay::displayText(I, 20, static_cast<int>(I.getWidth()) - 150, ss.str(), vpColor::red);
392 ss.str("");
393 ss << "error_tu: " << error_tu;
394 vpDisplay::displayText(I, 40, static_cast<int>(I.getWidth()) - 150, ss.str(), vpColor::red);
395
396 if (opt_verbose)
397 std::cout << "error translation: " << error_tr << " ; error rotation: " << error_tu << std::endl;
398
399 if ((error_tr < opt_convergence_threshold_t) && (error_tu < opt_convergence_threshold_tu)) {
400 has_converged = true;
401 std::cout << "Servo task has converged" << std::endl;
402 ;
403 vpDisplay::displayText(I, 100, 20, "Servo task has converged", vpColor::red);
404 }
405
406 if (first_time) {
407 first_time = false;
408 }
409 } // end if (c_M_o_vec.size() == 1)
410 else {
411 v_c = 0;
412 }
413
414 if (!send_velocities) {
415 v_c = 0;
416 }
417
418 // Send to the robot
419 robot.setVelocity(vpRobot::CAMERA_FRAME, v_c);
420
421 ss.str("");
422 ss << "Loop time: " << vpTime::measureTimeMs() - t_start << " ms";
423 vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
425
427 if (vpDisplay::getClick(I, button, false)) {
428 switch (button) {
430 send_velocities = !send_velocities;
431 break;
432
434 final_quit = true;
435 break;
436
437 default:
438 break;
439 }
440 }
441 }
442 std::cout << "Stop the robot " << std::endl;
443 robot.setRobotState(vpRobot::STATE_STOP);
444
445 if (opt_plot && plotter != nullptr) {
446 delete plotter;
447 plotter = nullptr;
448 }
449
450 if (!final_quit) {
451 while (!final_quit) {
452 rs.acquire(I);
454
455 vpDisplay::displayText(I, 20, 20, "Click to quit the program.", vpColor::red);
456 vpDisplay::displayText(I, 40, 20, "Visual servo converged.", vpColor::red);
457
458 if (vpDisplay::getClick(I, false)) {
459 final_quit = true;
460 }
461
463 }
464 }
465
466 if (traj_vip) {
467 delete[] traj_vip;
468 }
469 }
470 catch (const vpException &e) {
471 std::cout << "ViSP exception: " << e.what() << std::endl;
472 std::cout << "Stop the robot " << std::endl;
473 robot.setRobotState(vpRobot::STATE_STOP);
474 return EXIT_FAILURE;
475 }
476
477 return EXIT_SUCCESS;
478}
479#else
480int main()
481{
482#if !defined(VISP_HAVE_REALSENSE2)
483 std::cout << "Install librealsense-2.x" << std::endl;
484#endif
485#if !defined(VISP_HAVE_AFMA6)
486 std::cout << "ViSP is not build with Afma-6 robot support..." << std::endl;
487#endif
488 return EXIT_SUCCESS;
489}
490#endif
Adaptive gain computation.
@ TOOL_INTEL_D435_CAMERA
Definition vpAfma6.h:129
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithDistortion
Perspective projection with distortion model.
Implementation of column vector and the associated operations.
static const vpColor red
Definition vpColor.h:198
static const vpColor none
Definition vpColor.h:210
static const vpColor yellow
Definition vpColor.h:206
static const vpColor green
Definition vpColor.h:201
@ TAG_36h11
AprilTag 36h11 pattern (recommended).
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:60
Class that defines a 3D visual feature from a axis/angle parametrization that represent the rotati...
Class that defines the translation visual feature .
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpThetaUVector getThetaUVector() const
vpHomogeneousMatrix & buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
vpHomogeneousMatrix inverse() const
vpTranslationVector getTranslationVector() const
static double distance(const vpImagePoint &iP1, const vpImagePoint &iP2)
Definition of the vpImage class member functions.
Definition vpImage.h:131
static double rad(double deg)
Definition vpMath.h:129
static double deg(double rad)
Definition vpMath.h:119
This class enables real time drawing of 2D or 3D graphics. An instance of the class open a window whi...
Definition vpPlot.h:117
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
Control of Irisa's gantry robot named Afma6.
@ CAMERA_FRAME
Definition vpRobot.h:81
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
Definition vpRobot.h:64
@ STATE_STOP
Stops robot motion especially in velocity and acceleration control.
Definition vpRobot.h:63
double sumSquare() const
@ EYEINHAND_CAMERA
Definition vpServo.h:176
@ CURRENT
Definition vpServo.h:217
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()