Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
AROgreBasic.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2025 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Implementation of a simple augmented reality application using the vpAROgre
32 * class.
33 */
34
39
40#include <iostream>
41#include <visp3/core/vpConfig.h>
42
43#if defined(VISP_HAVE_OGRE) && defined(VISP_HAVE_DISPLAY)
44
45#include <visp3/ar/vpAROgre.h>
46#include <visp3/blob/vpDot2.h>
47#include <visp3/core/vpDebug.h>
48#include <visp3/core/vpImagePoint.h>
49#include <visp3/core/vpIoTools.h>
50#include <visp3/core/vpPixelMeterConversion.h>
51#include <visp3/core/vpPoint.h>
52#include <visp3/gui/vpDisplayFactory.h>
53#include <visp3/io/vpParseArgv.h>
54#include <visp3/io/vpVideoReader.h>
55#include <visp3/vision/vpPose.h>
56
57// List of allowed command line options
58#define GETOPTARGS "cdi:p:h"
59
60#ifdef ENABLE_VISP_NAMESPACE
61using namespace VISP_NAMESPACE_NAME;
62#endif
63
74void usage(const char *name, const char *badparam, const std::string &ipath, const std::string &ppath)
75{
76#if defined(VISP_HAVE_DATASET)
77#if VISP_HAVE_DATASET_VERSION >= 0x030600
78 std::string ext("png");
79#else
80 std::string ext("pgm");
81#endif
82#else
83 // We suppose that the user will download a recent dataset
84 std::string ext("png");
85#endif
86
87 fprintf(stdout, "\n\
88Test augmented reality using the vpAROgre class.\n\
89\n\
90SYNOPSIS\n\
91 %s [-i <test image path>] [-p <personal image path>]\n\
92 [-c] [-h]\n", name);
93
94 fprintf(stdout, "\n\
95OPTIONS: Default\n\
96 -i <input image path> %s\n\
97 Set image input path.\n\
98 From this path read images \n\
99 \"mire-2/image.%%04d.%s\". These \n\
100 images come from ViSP-images-x.y.z.tar.gz available \n\
101 on the ViSP website.\n\
102 Setting the VISP_INPUT_IMAGE_PATH environment\n\
103 variable produces the same behaviour than using\n\
104 this option.\n\
105 \n\
106 -p <personal image path> %s\n\
107 Specify a personal sequence containing images \n\
108 to process.\n\
109 By image sequence, we mean one file per image.\n\
110 Example : \"/Temp/visp-images/cube/image.%%04d.%s\"\n\
111 %%04d is for the image numbering.\n\
112\n\
113 -c\n\
114 Disable the mouse click. Useful to automate the \n\
115 execution of this program without human intervention.\n\
116\n\
117 -d\n\
118 Disable the display.\n\
119\n\
120 -h\n\
121 Print the help.\n",
122 ipath.c_str(), ext.c_str(), ppath.c_str(), ext.c_str());
123
124 if (badparam)
125 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
126}
127
140bool getOptions(int argc, const char **argv, std::string &ipath, std::string &ppath, bool &click_allowed, bool &use_display)
141{
142 const char *optarg_;
143 int c;
144 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
145
146 switch (c) {
147 case 'c':
148 click_allowed = false;
149 break;
150 case 'd':
151 use_display = false;
152 break;
153 case 'i':
154 ipath = optarg_;
155 break;
156 case 'p':
157 ppath = optarg_;
158 break;
159 case 'h':
160 usage(argv[0], nullptr, ipath, ppath);
161 return false;
162
163 default:
164 usage(argv[0], optarg_, ipath, ppath);
165 return false;
166 }
167 }
168
169 if ((c == 1) || (c == -1)) {
170 // standalone param or error
171 usage(argv[0], nullptr, ipath, ppath);
172 std::cerr << "ERROR: " << std::endl;
173 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
174 return false;
175 }
176
177 return true;
178}
179
186void computeInitialPose(vpCameraParameters *mcam, vpImage<unsigned char> &I, vpPose *mPose, vpDot2 *md,
187 vpImagePoint *mcog, vpHomogeneousMatrix *cMo, vpPoint *mP, const bool &opt_click_allowed, bool opt_display)
188{
189 vpDisplay *display = nullptr;
190 if (opt_display) {
191#if defined(VISP_HAVE_DISPLAY)
193#else
194 opt_display = false; // No display is available
195#endif
196 }
197
198 for (unsigned int i = 0; i < 4; ++i) {
199 if (opt_display) {
200 md[i].setGraphics(true);
201 }
202 else {
203 md[i].setGraphics(false);
204 }
205 }
206
207 if (opt_display) {
208 // Display size is automatically defined by the image (I) size
209 display->init(I, 100, 100, "Preliminary Pose Calculation");
210 // display the image
211 // The image class has a member that specify a pointer toward
212 // the display that has been initialized in the display declaration
213 // therefore is is no longer necessary to make a reference to the
214 // display variable.
216 // Flush the display
218 }
219
220 std::cout << "**"<< std::endl;
221 std::cout << "** Preliminary Pose Calculation" << std::endl;
222 std::cout << "** Click on the 4 dots" << std::endl;
223 std::cout << "** Dot1: (-x,-y,0), Dot2: (x,-y,0), Dot3: (x,y,0), Dot4: (-x,y,0)" << std::endl;
224 std::cout << "**" << std::endl;
225
226 vpImagePoint ip[4];
227 if (!opt_click_allowed) {
228 ip[0].set_i(265);
229 ip[0].set_j(93);
230 ip[1].set_i(248);
231 ip[1].set_j(242);
232 ip[2].set_i(166);
233 ip[2].set_j(215);
234 ip[3].set_i(178);
235 ip[3].set_j(85);
236 }
237
238 for (unsigned int i = 0; i < 4; ++i) {
239 // by using setGraphics, we request to see the edges of the dot
240 // in red on the screen.
241 // It uses the overlay image plane.
242 // The default of this setting is that it is time consuming
243
244 md[i].setGraphics(true);
245 md[i].setGrayLevelPrecision(0.7);
246 md[i].setSizePrecision(0.5);
247
248 for (unsigned int j = 0; j < i; j++)
249 md[j].display(I);
250
251 // flush the display buffer
252 if (opt_display) {
254 }
255 try {
256 if (opt_click_allowed && opt_display) {
257 md[i].initTracking(I);
258 // std::cout << "click " << i << " " << md[i] << std::endl;
259 }
260 else {
261 md[i].initTracking(I, ip[i]);
262 }
263 }
264 catch (...) {
265 }
266
267 mcog[i] = md[i].getCog();
268 // an exception is thrown by the track method if
269 // - dot is lost
270 // - the number of pixel is too small
271 // - too many pixels are detected (this is usual when a "big"
272 // specularity
273 // occurs. The threshold can be modified using the
274 // setNbMaxPoint(int) method
275 if (opt_display) {
276 md[i].display(I);
277 // flush the display buffer
279 }
280 }
281
282 if (opt_display) {
283 // display a red cross (size 10) in the image at the dot center
284 // of gravity location
285 //
286 // WARNING
287 // in the vpDisplay class member's when pixel coordinates
288 // are considered the first element is the row index and the second
289 // is the column index:
290 // vpDisplay::displayCross(Image, row index, column index, size, color)
291 // therefore u and v are inverted wrt to the vpDot specification
292 // Alternatively, to avoid this problem another set of member have
293 // been defined in the vpDisplay class.
294 // If the method name is postfixe with _uv the specification is :
295 // vpDisplay::displayCross_uv(Image, column index, row index, size,
296 // color)
297
298 for (unsigned int i = 0; i < 4; ++i) {
299 vpDisplay::displayCross(I, mcog[i], 10, vpColor::red);
300 }
301
302 // flush the X11 buffer
304 }
305
306 // --------------------------------------------------------
307 // Now we will compute the pose
308 // --------------------------------------------------------
309
310 // the list of point is cleared (if that's not done before)
311 mPose->clearPoint();
312
313 // we set the 3D points coordinates (in meter !) in the object/world frame
314 double l = 0.06;
315 double L = 0.07;
316 mP[0].setWorldCoordinates(-L, -l, 0); // (X,Y,Z)
317 mP[1].setWorldCoordinates(L, -l, 0);
318 mP[2].setWorldCoordinates(L, l, 0);
319 mP[3].setWorldCoordinates(-L, l, 0);
320
321 // pixel-> meter conversion
322 for (unsigned int i = 0; i < 4; ++i) {
323 // u[i]. v[i] are expressed in pixel
324 // conversion in meter is achieved using
325 // x = (u-u0)/px
326 // y = (v-v0)/py
327 // where px, py, u0, v0 are the intrinsic camera parameters
328 double x = 0, y = 0;
329 vpPixelMeterConversion::convertPoint(*mcam, mcog[i], x, y);
330 mP[i].set_x(x);
331 mP[i].set_y(y);
332 }
333
334 // The pose structure is build, we put in the point list the set of point
335 // here both 2D and 3D world coordinates are known
336 for (unsigned int i = 0; i < 4; ++i) {
337 mPose->addPoint(mP[i]); // and added to the pose computation point list
338 }
339
340 // compute the initial pose using Dementhon method followed by a non linear
341 // minimization method
342
343 // Compute initial pose
345
346 // Display briefly just to get an overview of the ViSP pose
347 if (opt_display) {
348 // Display the computed pose
349 mPose->display(I, *cMo, *mcam, 0.05, vpColor::red);
351 vpTime::wait(1000);
352 }
353
354 if (opt_display && display != nullptr) {
355 delete display;
356 }
357}
358
359int main(int argc, const char **argv)
360{
361#if defined(VISP_HAVE_DATASET)
362#if VISP_HAVE_DATASET_VERSION >= 0x030600
363 std::string ext("png");
364#else
365 std::string ext("pgm");
366#endif
367#else
368 // We suppose that the user will download a recent dataset
369 std::string ext("png");
370#endif
371
372 try {
373 std::string env_ipath;
374 std::string opt_ipath;
375 std::string ipath;
376 std::string opt_ppath;
377 std::string dirname;
378 std::string filename;
379 bool opt_click_allowed = true;
380 bool opt_display = true;
381
382 // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
383 // environment variable value
385
386 // Set the default input path
387 if (!env_ipath.empty())
388 ipath = env_ipath;
389
390 // Read the command line options
391 if (getOptions(argc, argv, opt_ipath, opt_ppath, opt_click_allowed, opt_display) == false) {
392 return EXIT_FAILURE;
393 }
394
395 // Get the option values
396 if (!opt_ipath.empty())
397 ipath = opt_ipath;
398
399 // Compare ipath and env_ipath. If they differ, we take into account
400 // the input path coming from the command line option
401 if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) {
402 if (ipath != env_ipath) {
403 std::cout << std::endl << "WARNING: " << std::endl;
404 std::cout << " Since -i <visp image path=" << ipath << "> "
405 << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl
406 << " we skip the environment variable." << std::endl;
407 }
408 }
409
410 // Test if an input path is set
411 if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) {
412 usage(argv[0], nullptr, ipath, opt_ppath);
413 std::cerr << std::endl << "ERROR:" << std::endl;
414 std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
415 << " environment variable to specify the location of the " << std::endl
416 << " image path where test images are located." << std::endl
417 << " Use -p <personal image path> option if you want to " << std::endl
418 << " use personal images." << std::endl
419 << std::endl;
420
421 return EXIT_FAILURE;
422 }
423
424 if (!opt_display && opt_click_allowed) {
425 std::cerr << std::endl << "ERROR:" << std::endl;
426 std::cerr << " Display is disabled but clicks are required !" << std::endl;
427 return EXIT_FAILURE;
428 }
429
430 std::ostringstream s;
431
432 if (opt_ppath.empty()) {
433 // Set the path location of the image sequence
434 dirname = vpIoTools::createFilePath(ipath, "mire-2");
435
436 // Build the name of the image file
437
438 s.setf(std::ios::right, std::ios::adjustfield);
439 s << "image.%04d.";
440 s << ext;
441 filename = vpIoTools::createFilePath(dirname, s.str());
442 }
443 else {
444 filename = opt_ppath;
445 }
446
447 // We will read a sequence of images
448 vpVideoReader grabber;
449 grabber.setFirstFrameIndex(1);
450 grabber.setFileName(filename.c_str());
451 // Grey level image associated to a display in the initial pose
452 // computation
453 vpImage<unsigned char> Idisplay;
454 // Grey level image to track points
456 // RGBa image to get background
458 // Matrix representing camera parameters
460
461 // Variables used for pose computation purposes
462 vpPose mPose;
463 vpDot2 md[4];
464 vpImagePoint mcog[4];
465 vpPoint mP[4];
466
467 // CameraParameters we got from calibration
468 // Keep u0 and v0 as center of the screen
470
471 try {
472 std::cout << "Load: " << filename << std::endl;
473 grabber.open(Idisplay);
474 grabber.acquire(Idisplay);
475 vpCameraParameters mcamTmp(592, 570, grabber.getWidth() / 2, grabber.getHeight() / 2);
476 // Compute the initial pose of the camera
477 computeInitialPose(&mcamTmp, Idisplay, &mPose, md, mcog, &cMo, mP, opt_click_allowed, opt_display);
478 // Close the framegrabber
479 grabber.close();
480
481 // Associate the grabber to the RGBa image
482 grabber.open(IC);
483 mcam.init(mcamTmp);
484 }
485 catch (...) {
486 std::cerr << std::endl << "ERROR:" << std::endl;
487 std::cerr << " Cannot read " << filename << std::endl;
488 std::cerr << " Check your -i " << ipath << " option " << std::endl
489 << " or VISP_INPUT_IMAGE_PATH environment variable." << std::endl;
490 return EXIT_FAILURE;
491 }
492
493 // Create a vpAROgre object with color background
494 vpAROgre ogre(mcam, grabber.getWidth(), grabber.getHeight());
495 // Initialize it
496 ogre.setShowConfigDialog(opt_display);
497 ogre.init(IC, false, !opt_display);
498 ogre.load("Robot", "robot.mesh");
499 ogre.setScale("Robot", 0.001f, 0.001f, 0.001f);
500 ogre.setRotation("Robot", vpRotationMatrix(vpRxyzVector(M_PI / 2, -M_PI / 2, 0)));
501
502 // Add an optional point light source
503 ogre.getSceneManager()->setAmbientLight(Ogre::ColourValue(static_cast<float>(0.6), static_cast<float>(0.6), static_cast<float>(0.6))); // Default value of lightning
504 Ogre::Light *light = ogre.getSceneManager()->createLight();
505 light->setDiffuseColour(1.0, 1.0, 1.0); // scaled RGB values
506 light->setSpecularColour(1.0, 1.0, 1.0); // scaled RGB values
507 // Lumiere ponctuelle
508#if (VISP_HAVE_OGRE_VERSION < (1 << 16 | 10 << 8 | 0))
509 light->setPosition(-5, -5, 10);
510#else
511 Ogre::SceneNode *spotLightNode = ogre.getSceneManager()->getRootSceneNode()->createChildSceneNode();
512 spotLightNode->attachObject(light);
513 spotLightNode->setPosition(Ogre::Vector3(-5, -5, 10));
514#endif
515 light->setType(Ogre::Light::LT_POINT);
516 light->setAttenuation((Ogre::Real)100, (Ogre::Real)1.0, (Ogre::Real)0.045, (Ogre::Real)0.0075);
517 // Ombres
518 light->setCastShadows(true);
519
520 // Rendering loop
521 while (ogre.continueRendering() && !grabber.end()) {
522 // Acquire a frame
523 grabber.acquire(IC);
524
525 // Convert it to a grey level image for tracking purpose
527
528 // kill the point list
529 mPose.clearPoint();
530
531 // track the dot
532 for (int i = 0; i < 4; ++i) {
533 // track the point
534 md[i].track(I, mcog[i]);
535 md[i].setGrayLevelPrecision(0.90);
536 // pixel->meter conversion
537 {
538 double x = 0, y = 0;
539 vpPixelMeterConversion::convertPoint(mcam, mcog[i], x, y);
540 mP[i].set_x(x);
541 mP[i].set_y(y);
542 }
543
544 // and added to the pose computation point list
545 mPose.addPoint(mP[i]);
546 }
547 // the pose structure has been updated
548
549 // the pose is now updated using the virtual visual servoing approach
550 // Dementhon or lagrange is no longer necessary, pose at the
551 // previous iteration is sufficient
553
554 // Display with ogre
555 if (opt_display) {
556 ogre.display(IC, cMo);
557 }
558
559 // Wait so that the video does not go too fast
560 vpTime::wait(15);
561 }
562 // Close the grabber
563 grabber.close();
564 return EXIT_SUCCESS;
565 }
566 catch (const vpException &e) {
567 std::cout << "Catch a ViSP exception: " << e << std::endl;
568 return EXIT_FAILURE;
569 }
570 catch (Ogre::Exception &e) {
571 std::cout << "Catch an Ogre exception: " << e.getDescription() << std::endl;
572 return EXIT_FAILURE;
573 }
574 catch (...) {
575 std::cout << "Catch an exception " << std::endl;
576 return EXIT_FAILURE;
577 }
578}
579#else // VISP_HAVE_OGRE && VISP_HAVE_DISPLAY
580int main()
581{
582#if (!(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GTK) || defined(VISP_HAVE_GDI)))
583 std::cout << "You do not have X11, or GTK, or GDI (Graphical Device Interface) functionalities to display images..."
584 << std::endl;
585 std::cout << "Tip if you are on a unix-like system:" << std::endl;
586 std::cout << "- Install X11, configure again ViSP using cmake and build again this example" << std::endl;
587 std::cout << "Tip if you are on a windows-like system:" << std::endl;
588 std::cout << "- Install GDI, configure again ViSP using cmake and build again this example" << std::endl;
589#else
590 std::cout << "You do not have Ogre functionalities" << std::endl;
591 std::cout << "Tip:" << std::endl;
592 std::cout << "- Install Ogre3D, configure again ViSP using cmake and build again this example" << std::endl;
593#endif
594 return EXIT_SUCCESS;
595}
596#endif
Implementation of an augmented reality viewer using Ogre3D 3rd party.
Definition vpAROgre.h:121
Generic class defining intrinsic camera parameters.
void init()
Basic initialization with the default parameters.
static const vpColor red
Definition vpColor.h:198
Class that defines generic functionalities for display.
Definition vpDisplay.h:171
static void display(const vpImage< unsigned char > &I)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
This tracker is meant to track a blob (connex pixels with same gray level) on a vpImage.
Definition vpDot2.h:127
void track(const vpImage< unsigned char > &I, bool canMakeTheWindowGrow=true)
Definition vpDot2.cpp:441
void setGraphics(bool activate)
Definition vpDot2.h:320
void display(const vpImage< unsigned char > &I, vpColor color=vpColor::red, unsigned int thickness=1) const
Definition vpDot2.cpp:219
void setSizePrecision(const double &sizePrecision)
Definition vpDot2.cpp:745
void setGrayLevelPrecision(const double &grayLevelPrecision)
Definition vpDot2.cpp:715
vpImagePoint getCog() const
Definition vpDot2.h:183
void initTracking(const vpImage< unsigned char > &I, unsigned int size=0)
Definition vpDot2.cpp:263
error that can be emitted by ViSP classes.
Definition vpException.h:60
unsigned int getWidth() const
Return the number of columns in the image.
unsigned int getHeight() const
Return the number of rows in the image.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_j(double jj)
void set_i(double ii)
Definition of the vpImage class member functions.
Definition vpImage.h:131
static std::string getViSPImagesDataPath()
static std::string createFilePath(const std::string &parent, const std::string &child)
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition vpPoint.h:79
void set_x(double x)
Set the point x coordinate in the image plane.
Definition vpPoint.cpp:471
void setWorldCoordinates(double oX, double oY, double oZ)
Definition vpPoint.cpp:116
void set_y(double y)
Set the point y coordinate in the image plane.
Definition vpPoint.cpp:473
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition vpPose.h:82
void addPoint(const vpPoint &P)
Definition vpPose.cpp:96
@ DEMENTHON_LAGRANGE_VIRTUAL_VS
Definition vpPose.h:103
@ VIRTUAL_VS
Definition vpPose.h:97
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, FuncCheckValidityPose func=nullptr)
Definition vpPose.cpp:385
void clearPoint()
Definition vpPose.cpp:89
static void display(vpImage< unsigned char > &I, vpHomogeneousMatrix &cMo, vpCameraParameters &cam, double size, vpColor col=vpColor::none)
Definition vpPose.cpp:567
Implementation of a rotation matrix and operations on such kind of matrices.
Implementation of a rotation vector as Euler angle minimal representation.
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void open(vpImage< vpRGBa > &I) VP_OVERRIDE
void setFileName(const std::string &filename)
void setFirstFrameIndex(const long first_frame)
void close() VP_OVERRIDE
void acquire(vpImage< vpRGBa > &I) VP_OVERRIDE
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT int wait(double t0, double t)