Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
vpRBTracker.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2025 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 */
30
31#include <visp3/rbt/vpRBTracker.h>
32
33#if defined(VISP_HAVE_NLOHMANN_JSON)
34#include VISP_NLOHMANN_JSON(json.hpp)
35#endif
36
37#include <visp3/core/vpExponentialMap.h>
38#include <visp3/core/vpIoTools.h>
39
40#include <visp3/ar/vpPanda3DRendererSet.h>
41#include <visp3/ar/vpPanda3DGeometryRenderer.h>
42#include <visp3/ar/vpPanda3DRGBRenderer.h>
43#include <visp3/ar/vpPanda3DFrameworkManager.h>
44
45#include <visp3/rbt/vpRBFeatureTrackerFactory.h>
46#include <visp3/rbt/vpRBDriftDetectorFactory.h>
47#include <visp3/rbt/vpObjectMaskFactory.h>
48#include <visp3/rbt/vpRBVisualOdometry.h>
49#include <visp3/rbt/vpRBInitializationHelper.h>
50
52
57{
58 m_rendererSettings.setClippingDistance(0.01, 1.0);
59 m_renderer.setRenderParameters(m_rendererSettings);
60
61 m_driftDetector = nullptr;
62 m_mask = nullptr;
63 m_odometry = nullptr;
64}
65
67{
68 cMo = m_cMo;
69}
70
72{
73 m_cMo = cMo;
74 m_cMoPrev = cMo;
75 m_renderer.setCameraPose(cMo.inverse());
76}
77
79{
80 vpMatrix sumInvs(6, 6, 0.0);
81 double sumWeights = 0.0;
82 for (const std::shared_ptr<vpRBFeatureTracker> &tracker: m_trackers) {
83 if (tracker->getNumFeatures() == 0) {
84 continue;
85 }
86 tracker->updateCovariance(m_lambda);
87 vpMatrix trackerCov = tracker->getCovariance();
88 double trackerWeight = tracker->getVVSTrackerWeight(1.0);
89 if (trackerCov.getRows() != 6 || trackerCov.getCols() != 6) {
91 "Expected tracker pose covariance to have dimensions 6x6, but got %dx%d",
92 trackerCov.getRows(), trackerCov.getCols());
93 }
94
95 sumInvs += (trackerWeight * trackerCov.pseudoInverse());
96 sumWeights += trackerWeight;
97 }
98 return sumWeights * sumInvs.pseudoInverse();
99}
100
102
103void vpRBTracker::setCameraParameters(const vpCameraParameters &cam, unsigned h, unsigned w)
104{
107 "Camera model cannot have distortion. Undistort images before tracking and use the undistorted camera model");
108 }
109 if (h == 0 || w == 0) {
110 throw vpException(
112 "Image dimensions must be greater than 0"
113 );
114 }
115 m_cam = cam;
116 m_imageHeight = h;
117 m_imageWidth = w;
118 m_rendererSettings.setCameraIntrinsics(m_cam);
119 m_rendererSettings.setImageResolution(m_imageHeight, m_imageWidth);
120 m_renderer.setRenderParameters(m_rendererSettings);
121}
122
127
129{
132 m_firstIteration = true;
134 for (std::shared_ptr<vpRBFeatureTracker> &tracker: m_trackers) {
135 tracker->reset();
136 }
137 if (m_odometry) {
138 m_odometry->reset();
139 }
140 if (m_mask) {
141 m_mask->reset();
142 }
143 if (m_driftDetector) {
144 m_driftDetector->reset();
145 }
146}
147
148void vpRBTracker::setModelPath(const std::string &path)
149{
150 m_modelPath = path;
151 m_modelChanged = true;
152}
153
154void vpRBTracker::setupRenderer(const std::string &file)
155{
156 if (!m_rendererIsSetup) {
157 m_renderer.setRenderParameters(m_rendererSettings);
158 const std::shared_ptr<vpPanda3DGeometryRenderer> geometryRenderer = std::make_shared<vpPanda3DGeometryRenderer>(
160 m_renderer.addSubRenderer(geometryRenderer);
161 }
162 if (!vpIoTools::checkFilename(file)) {
163 throw vpException(vpException::badValue, "3D model file %s could not be found", file.c_str());
164 }
165
166
167 // Add silhouette extractor if required
168 bool requiresSilhouetteShader = false;
169 for (std::shared_ptr<vpRBFeatureTracker> &tracker: m_trackers) {
170 if (tracker->requiresSilhouetteCandidates()) {
171 requiresSilhouetteShader = true;
172 break;
173 }
174 }
175 if (requiresSilhouetteShader && m_renderer.getRenderer<vpPanda3DDepthCannyFilter>() == nullptr) {
176 static int cannyId = 0;
177
178 m_renderer.addSubRenderer(std::make_shared<vpPanda3DDepthCannyFilter>(
179 "depthCanny" + std::to_string(cannyId), m_renderer.getRenderer<vpPanda3DGeometryRenderer>(), true, 0.0));
180 ++cannyId;
181 }
182 if (!m_rendererIsSetup) {
183 m_renderer.initFramework();
184 }
185 else {
187 }
188 if (m_modelChanged) {
189 m_renderer.clearScene();
190 m_renderer.addLight(vpPanda3DAmbientLight("ambient", vpRGBf(0.4f)));
191 m_renderer.addNodeToScene(m_renderer.loadObject("object", file));
192 m_renderer.setFocusedObject("object");
193 m_modelChanged = false;
194 }
195 m_rendererIsSetup = true;
196}
197
199{
200 for (std::shared_ptr<vpRBFeatureTracker> tracker : m_trackers) {
201 if (tracker->requiresDepth() || tracker->requiresRGB()) {
202 throw vpException(vpException::badValue, "Some tracked features require RGB or depth features");
203 }
204 }
205 checkDimensionsOrThrow(I, "grayscale");
206 vpRBFeatureTrackerInput frameInput;
207 frameInput.I = I;
208 frameInput.cam = m_cam;
209 return track(frameInput);
210}
211
213{
214 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
215 if (tracker->requiresDepth()) {
216 throw vpException(vpException::badValue, "Some tracked features require depth features");
217 }
218 }
219 checkDimensionsOrThrow(I, "grayscale");
220 checkDimensionsOrThrow(IRGB, "color");
221 vpRBFeatureTrackerInput frameInput;
222 frameInput.I = I;
223 frameInput.IRGB = IRGB;
224 frameInput.cam = m_cam;
225 return track(frameInput);
226}
227
229{
232 m_convergenceMetric->sampleObject(m_renderer);
233 }
235 vpHomogeneousMatrix c(0, 0, 0.5, 0, 0, 0);
236 updateRender(f, c);
237}
238
240{
241 checkDimensionsOrThrow(I, "grayscale");
242 checkDimensionsOrThrow(IRGB, "color");
243 checkDimensionsOrThrow(depth, "depth");
244 vpRBFeatureTrackerInput frameInput;
245 frameInput.I = I;
246 frameInput.IRGB = IRGB;
247 frameInput.depth = depth;
248 frameInput.cam = m_cam;
249 return track(frameInput);
250}
251
253{
254 vpRBTrackingResult result;
255 vpRBTrackingTimings &timer = result.timer();
256 timer.reset();
257
258 // Render the object at the current pose
259 timer.startTimer();
260 if (m_firstIteration || !m_convergenceMetric || m_convergenceMetric->shouldUpdateRender(m_cam, m_cMo, m_currentFrame.renders.cMo)) {
261 updateRender(input);
262 }
263 else {
264 input.renders = m_currentFrame.renders;
265 }
266 timer.setRenderTime(timer.endTimer());
267
268 if (m_firstIteration) {
269 m_firstIteration = false;
270 m_previousFrame.I = input.I;
271 m_previousFrame.IRGB = input.IRGB;
272 }
273
274
275 // Compute the object segmentation mask that will be used by trackers to select features
276 timer.startTimer();
277 if (m_mask) {
278 m_mask->updateMask(input, m_previousFrame, input.mask);
279 }
280 timer.setMaskTime(timer.endTimer());
281
282 // Extract silhouette contours
283 bool requiresSilhouetteCandidates = false;
284 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
285 if (tracker->requiresSilhouetteCandidates()) {
286 requiresSilhouetteCandidates = true;
287 break;
288 }
289 }
290
291 timer.startTimer();
292 if (requiresSilhouetteCandidates) {
293 const vpHomogeneousMatrix cTcp = m_cMo * m_cMoPrev.inverse();
295 input.renders.silhouetteCanny, input.renders.isSilhouette, input.cam, cTcp);
296 if (input.silhouettePoints.size() == 0) {
297 result.setStoppingReason(vpRBTrackingStoppingReason::OBJECT_NOT_IN_IMAGE);
298 return result;
299 }
300 }
301 timer.setSilhouetteTime(timer.endTimer());
302
303 int id = 0;
304 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
305 timer.startTimer();
306 tracker->onTrackingIterStart(input, m_cMo);
307 timer.setTrackerIterStartTime(id, timer.endTimer());
308 id += 1;
309 }
310
311
313 // Perform odometry: estimate camera motion and potentially update render to match
314 if (m_odometry) {
315 timer.startTimer();
316 m_odometry->compute(input, m_previousFrame);
317 vpHomogeneousMatrix cMo_beforeOdo = m_cMo;
318 vpHomogeneousMatrix cnTc = m_odometry->getCameraMotion();
319 m_cMo = cnTc * m_cMo;
320 bool shouldRerender = true;
321
322 if (m_cMo != cMo_beforeOdo && m_convergenceMetric) {
323 const double metric = (*m_convergenceMetric)(m_cam, m_cMo, cMo_beforeOdo);
324 shouldRerender = m_convergenceMetric && (metric > m_convergenceMetric->getUpdateRenderThreshold());
325 result.setOdometryMetricAndThreshold(metric, m_convergenceMetric->getUpdateRenderThreshold());
326 }
327 result.setOdometryMotion(cMo_beforeOdo, cnTc, m_cMo);
328 if (shouldRerender) {
329 updateRender(input);
330 if (requiresSilhouetteCandidates) {
332 input.renders.silhouetteCanny, input.renders.isSilhouette, input.cam, cnTc);
333 if (input.silhouettePoints.size() == 0) {
334 result.setStoppingReason(vpRBTrackingStoppingReason::OBJECT_NOT_IN_IMAGE);
335 return result;
336 }
337 }
338
339 }
340 timer.setOdometryTime(timer.endTimer());
341 }
342
343
344 id = 0;
345 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
346
347 timer.startTimer();
348 try {
349 tracker->extractFeatures(input, m_previousFrame, m_cMo);
350 }
351 catch (vpException &e) {
352 std::cerr << "Tracker " << id << " raised an exception in extractFeatures" << std::endl;
353 throw e;
354 }
355 timer.setTrackerFeatureExtractionTime(id, timer.endTimer());
356 id += 1;
357 }
358 id = 0;
359 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
360 timer.startTimer();
361 try {
362 tracker->trackFeatures(input, m_previousFrame, m_cMo);
363 }
364 catch (vpException &e) {
365 std::cerr << "Tracker " << id << " raised an exception in trackFeatures" << std::endl;
366 throw e;
367 }
368 timer.setTrackerFeatureTrackingTime(id, timer.endTimer());
369 id += 1;
370 }
371
372 id = 0;
373 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
374 timer.startTimer();
375 tracker->initVVS(input, m_previousFrame, m_cMo);
376 timer.setInitVVSTime(id, timer.endTimer());
377 id += 1;
378 }
379
380
381 double bestError = std::numeric_limits<double>::max();
382 vpHomogeneousMatrix m_cMoPrevIter = m_cMo;
383 vpHomogeneousMatrix best_cMo = m_cMo;
384 double mu = m_muInit;
385 unsigned int iter = 0;
386 result.beforeIter(m_cMo);
387 for (iter = 0; iter < m_vvsIterations; ++iter) {
388 id = 0;
389 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
390 timer.startTimer();
391 try {
392 tracker->computeVVSIter(input, m_cMo, iter);
393 }
394 catch (vpException &e) {
395 std::cerr << "Tracker " << id << " raised an exception in computeVVSIter" << std::endl;
396 throw e;
397 }
398 timer.addTrackerVVSTime(id, timer.endTimer());
399 id += 1;
400 }
401
402 vpMatrix LTL(6, 6, 0.0);
403 vpColVector LTR(6, 0.0);
404 double error = 0.f;
405 unsigned int numFeatures = 0;
406
407 bool shouldComputeVelocityInObjectFrame = false;
408 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
409 if (tracker->hasIgnoredDofs()) {
410 shouldComputeVelocityInObjectFrame = true;
411 break;
412 }
413 }
414
415 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
416 tracker->setComputeJacobianObjectSpace(shouldComputeVelocityInObjectFrame);
417 }
418 id = 0;
419 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
420 if (tracker->getNumFeatures() > 0) {
421 numFeatures += tracker->getNumFeatures();
422 const double weight = tracker->getVVSTrackerWeight(static_cast<double>(iter) / static_cast<double>(m_vvsIterations));
423 bool validQuantities = vpArray2D<double>::isFinite(tracker->getLTL()) && vpArray2D<double>::isFinite(tracker->getLTR()) && vpMath::isFinite(weight);
424 if (validQuantities) {
425 LTL += weight * tracker->getLTL();
426 LTR += weight * tracker->getLTR();
427 error += weight * (tracker->getWeightedError()).sumSquare();
428 }
429 else {
430 std::cerr << std::endl;
431 std::cerr << "There were NaN values in tracker " << id << std::endl;
432 std::cerr << "LTL = " << tracker->getLTL() << std::endl;
433 std::cerr << "LTR = " << tracker->getLTR().t() << std::endl;
434 std::cerr << "Weight = " << weight << std::endl;
435 std::cerr << "This tracker was ignored." << std::endl;
436
437 }
438 }
439 id += 1;
440 }
441
442 if (numFeatures >= 6) {
443
444 if (error < bestError) {
445 bestError = error;
446 best_cMo = m_cMo;
447 }
448
449 vpMatrix H(6, 6);
450 H.eye(6);
451
453 for (unsigned int i = 0; i < 6; ++i) {
454 H[i][i] = LTL[i][i];
455 }
456 }
457 vpColVector v;
458 try {
459 v = -m_lambda * ((LTL + mu * H).pseudoInverse(LTL.getRows() * std::numeric_limits<double>::epsilon()) * LTR);
460
461 if (shouldComputeVelocityInObjectFrame) {
463 v = cVo * v;
464 }
465
467 }
468 catch (vpException &) {
469 result.setStoppingReason(vpRBTrackingStoppingReason::EXCEPTION);
470 std::cerr << "Could not compute pseudo inverse" << std::endl;
471 break;
472 }
473
475
476 double convergenceMetricValue = 0.0;
477 bool converged = false;
478 if (m_convergenceMetric && iter > 0) {
479 convergenceMetricValue = (*m_convergenceMetric)(m_cam, m_cMoPrevIter, m_cMo);
480 if (convergenceMetricValue < m_convergenceMetric->getConvergenceThreshold()) {
481 converged = true;
482 }
483 }
484 result.onEndIter(m_cMo, v, convergenceMetricValue, LTL, LTR, mu);
485 if (converged) {
486 result.setStoppingReason(vpRBTrackingStoppingReason::CONVERGENCE_CRITERION);
487 break;
488 }
489
490 mu *= m_muIterFactor;
491 m_cMoPrevIter = m_cMo;
492 }
493 else {
494 result.setStoppingReason(vpRBTrackingStoppingReason::NOT_ENOUGH_FEATURES);
495 break;
496 }
497 }
498
499
500
501 if (iter == m_vvsIterations) {
502 result.setStoppingReason(vpRBTrackingStoppingReason::MAX_ITERS);
503 }
504
505 //m_cMo = best_cMo;
506
507 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
508 tracker->onTrackingIterEnd(m_cMo);
509 }
510 //m_cMo = m_kalman.filter(m_cMo, 1.0 / 20.0);
511 if (m_currentFrame.I.getSize() == 0) {
512 m_currentFrame = input;
513 m_previousFrame = input;
514 }
515 else {
516 m_previousFrame = std::move(m_currentFrame);
517 m_currentFrame = std::move(input);
518 }
519 timer.startTimer();
520 if (m_driftDetector) {
522 }
523 timer.setDriftDetectionTime(timer.endTimer());
524
525 return result;
526}
527
528
529
531{
533 frame.I = I;
534 frame.IRGB = IRGB;
535 frame.depth = depth;
536
537 updateRender(frame, cMo);
538
539 if (m_driftDetector == nullptr) {
540 throw vpException(vpException::functionNotImplementedError, "Scoring relies on drift detector");
541 }
542 return m_driftDetector->score(frame, cMo);
543}
544
545
550
552{
553 m_renderer.setCameraPose(cMo.inverse());
554
555
556 frame.renders.cMo = cMo;
557
558 // Update clipping distances
562
563 float clipNear, clipFar;
564 m_renderer.computeClipping(clipNear, clipFar);
565 frame.renders.zNear = std::max(0.001f, clipNear);
566 frame.renders.zFar = clipFar;
567
568 {
569 vpTranslationVector tMin, tMax;
570 m_renderer.get3DExtents(tMin, tMax);
571 double diameter = (tMax - tMin).frobeniusNorm();
572 frame.renders.objectDiameter = diameter;
573
574 vpTranslationVector center((tMax - tMin) / 2.0);
575 frame.renders.objectCenter = center;
576 }
577 m_rendererSettings.setClippingDistance(frame.renders.zNear, frame.renders.zFar);
578 m_renderer.setRenderParameters(m_rendererSettings);
579
580 bool renderSilhouette = shouldRenderSilhouette();
581 if (renderSilhouette) {
582 // For silhouette extraction, update depth difference threshold
583 double thresholdValue = m_depthSilhouetteSettings.getThreshold();
584 if (m_depthSilhouetteSettings.thresholdIsRelative()) {
585 m_renderer.getRenderer<vpPanda3DDepthCannyFilter>()->setEdgeThreshold((frame.renders.zFar - frame.renders.zNear) * thresholdValue);
586 }
587 else {
588 m_renderer.getRenderer<vpPanda3DDepthCannyFilter>()->setEdgeThreshold(thresholdValue);
589 }
590 }
591
592 // Call Panda renderer
593 m_renderer.renderFrame();
594
595 frame.renders.boundingBox = m_renderer.getBoundingBox();
596
597 // Extract data from Panda textures
598#ifdef VISP_HAVE_OPENMP
599#pragma omp parallel sections
600#endif
601
602 {
603#ifdef VISP_HAVE_OPENMP
604#pragma omp section
605#endif
606 {
607 m_renderer.getRenderer<vpPanda3DGeometryRenderer>()->getRender(
608 frame.renders.normals,
609 frame.renders.depth,
610 frame.renders.boundingBox,
612 }
613#ifdef VISP_HAVE_OPENMP
614#pragma omp section
615#endif
616 {
617 if (renderSilhouette) {
618 m_renderer.getRenderer<vpPanda3DDepthCannyFilter>()->getRender(
620 frame.renders.isSilhouette,
621 frame.renders.boundingBox,
623 }
624 }
625 // #pragma omp section
626 // {
627 // vpImage<vpRGBa> renders.color;
628 // m_renderer.getRenderer<vpPanda3DRGBRenderer>()->getRender(renders.color);
629 // m_renderer.placeRendernto(renders.color, frame.renders.color, vpRGBa(0));
630 // }
631 }
632}
633
634std::vector<vpRBSilhouettePoint>
636 const vpImage<float> &silhouetteCanny, const vpImage<unsigned char> &Ivalid,
637 const vpCameraParameters &cam, const vpHomogeneousMatrix &cTcp)
638{
639 std::vector<std::pair<unsigned int, unsigned int>> candidates =
640 m_depthSilhouetteSettings.getSilhouetteCandidates(Ivalid, Idepth, cam, cTcp, m_previousFrame.silhouettePoints, 42);
641
642 std::vector<vpRBSilhouettePoint> points;
643 vpColVector norm(3);
644
645 for (unsigned int i = 0; i < candidates.size(); ++i) {
646 unsigned int n = candidates[i].first, m = candidates[i].second;
647 double theta = silhouetteCanny[n][m];
648 if (std::isnan(theta)) {
649 continue;
650 }
651
652 norm[0] = Inorm[n][m].R;
653 norm[1] = Inorm[n][m].G;
654 norm[2] = Inorm[n][m].B;
655 const double l = std::sqrt(norm[0] * norm[0] + norm[1] * norm[1] + norm[2] * norm[2]);
656 if (l > 1e-1) {
657 norm.normalize();
658 const double Z = Idepth[n][m];
659#if defined(VISP_DEBUG_RB_TRACKER)
660 if (fabs(theta) > M_PI + 1e-6) {
661 throw vpException(vpException::badValue, "Theta expected to be in -Pi, Pi range but was not");
662 }
663#endif
664 vpRBSilhouettePoint p(n, m, norm, theta, Z);
665 p.detectSilhouette(Idepth);
666 points.push_back(p);
667 }
668 }
669 return points;
670}
671
672
673void vpRBTracker::addTracker(std::shared_ptr<vpRBFeatureTracker> tracker)
674{
675 if (tracker == nullptr) {
676 throw vpException(vpException::badValue, "Adding tracker: tracker cannot be null");
677 }
678 m_trackers.push_back(tracker);
679}
680
682{
683 if (m_mask) {
684 m_mask->display(m_currentFrame.mask, Imask);
685 }
686}
687
689{
690 if (m_currentFrame.renders.normals.getSize() == 0) {
691 return;
692 }
693
696 }
697
698 for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
699 if (tracker->featuresShouldBeDisplayed()) {
700 tracker->display(m_currentFrame.cam, I, IRGB, depth);
701 }
702 }
703}
704
709
710#if defined(VISP_HAVE_NLOHMANN_JSON)
711void vpRBTracker::loadConfigurationFile(const std::string &filename)
712{
713 std::ifstream jsonFile(filename);
714 if (!jsonFile.good()) {
715 throw vpException(vpException::ioError, "Could not read from settings file " + filename + " to initialize the RBTracker");
716 }
717 nlohmann::json settings;
718 try {
719 settings = nlohmann::json::parse(jsonFile);
720 }
721 catch (nlohmann::json::parse_error &e) {
722 std::stringstream msg;
723 msg << "Could not parse JSON file : \n";
724 msg << e.what() << std::endl;
725 msg << "Byte position of error: " << e.byte;
726 throw vpException(vpException::ioError, msg.str());
727 }
728 loadConfiguration(settings);
729 jsonFile.close();
730}
731
732void vpRBTracker::loadConfiguration(const nlohmann::json &j)
733{
734 m_firstIteration = true;
735 m_trackers.clear();
736 m_convergenceMetric = nullptr;
737 m_odometry = nullptr;
738 m_driftDetector = nullptr;
739 m_mask = nullptr;
740
741 m_displaySilhouette = j.value("displaySilhouette", m_displaySilhouette);
742
743 if (j.contains("metric")) {
745 }
746
747 if (j.contains("camera")) {
748 const nlohmann::json cameraSettings = j.at("camera");
749 m_cam = cameraSettings.at("intrinsics");
750 m_imageHeight = cameraSettings.value("height", m_imageHeight);
751 m_imageWidth = cameraSettings.value("width", m_imageWidth);
753 }
754
755 if (j.contains("model")) {
756 setModelPath(j.at("model"));
757 }
758
759 const nlohmann::json vvsSettings = j.at("vvs");
760 setMaxOptimizationIters(vvsSettings.value("maxIterations", m_vvsIterations));
761 setOptimizationGain(vvsSettings.value("gain", m_lambda));
762 setOptimizationInitialMu(vvsSettings.value("mu", m_muInit));
763 setOptimizationMuIterFactor(vvsSettings.value("muIterFactor", m_muIterFactor));
764 setScaleInvariantRegularization(vvsSettings.value("scaleInvariant", m_scaleInvariantOptim));
765
766 m_depthSilhouetteSettings = j.at("silhouetteExtractionSettings");
767
768 m_trackers.clear();
769 const nlohmann::json features = j.at("features");
771 for (const nlohmann::json &trackerSettings: features) {
772 std::shared_ptr<vpRBFeatureTracker> tracker = featureFactory.buildFromJson(trackerSettings);
773 if (tracker == nullptr) {
774 throw vpException(
776 "Cannot instantiate subtracker with the current settings, make sure that the type is registered. Settings: %s",
777 trackerSettings.dump(2).c_str()
778 );
779 }
780 m_trackers.push_back(tracker);
781 }
782
783 if (j.contains("mask")) {
785 const nlohmann::json maskSettings = j.at("mask");
786 m_mask = maskFactory.buildFromJson(maskSettings);
787 if (m_mask == nullptr) {
788 throw vpException(
790 "Cannot instantiate object mask with the current settings, make sure that the type is registered. Settings: %s",
791 maskSettings.dump(2).c_str());
792 }
793 }
794 if (j.contains("drift")) {
796 const nlohmann::json driftSettings = j.at("drift");
797 m_driftDetector = factory.buildFromJson(driftSettings);
798 if (m_driftDetector == nullptr) {
799 throw vpException(
801 "Cannot instantiate drift detection with the current settings, make sure that the type is registered in the factory"
802 );
803 }
804 }
805}
806#endif
807
808END_VISP_NAMESPACE
unsigned int getCols() const
Definition vpArray2D.h:423
static bool isFinite(const vpArray2D< double > &A)
Definition vpArray2D.h:1188
unsigned int getRows() const
Definition vpArray2D.h:433
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
Implementation of column vector and the associated operations.
std::shared_ptr< T > buildFromJson(const nlohmann::json &j)
error that can be emitted by ViSP classes.
Definition vpException.h:60
@ ioError
I/O error.
Definition vpException.h:67
@ badValue
Used to indicate that a value is not in the allowed range.
Definition vpException.h:73
@ functionNotImplementedError
Function not implemented.
Definition vpException.h:66
@ dimensionError
Bad dimension.
Definition vpException.h:71
static vpHomogeneousMatrix direct(const vpColVector &v)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
Definition of the vpImage class member functions.
Definition vpImage.h:131
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Definition vpImage.h:544
static bool checkFilename(const std::string &filename)
static bool isFinite(double value)
Definition vpMath.cpp:198
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:175
vpMatrix pseudoInverse(double svThreshold=1e-6) const
Single object focused renderer.
A factory that can be used to create Object segmentation algorithms from JSON data.
static vpObjectMaskFactory & getFactory()
Class representing an ambient light.
Implementation of canny filtering, using Sobel kernels.
static vpPanda3DFrameworkManager & getInstance()
void enableSingleRenderer(vpPanda3DBaseRenderer &renderer)
Renderer that outputs object geometric information.
static std::shared_ptr< vpRBConvergenceMetric > loadFromJSON(const nlohmann::json &j)
A factory that can be used to instanciate drift detection algorithms from JSON data.
static vpRBDriftDetectorFactory & getFactory()
A factory to instantiate feature trackers from JSON data.
static vpRBFeatureTrackerFactory & getFactory()
All the data related to a single tracking frame. This contains both the input data (from a real camer...
vpImage< vpRGBa > IRGB
Image luminance.
vpImage< unsigned char > I
std::vector< vpRBSilhouettePoint > silhouettePoints
vpImage< float > mask
depth image, 0 sized if depth is not available
vpImage< float > depth
RGB image, 0 sized if RGB is not available.
vpRBRenderData renders
camera parameters
Silhouette point simple candidate representation.
bool m_displaySilhouette
Metric used to compare the motion between different poses.
std::shared_ptr< vpRBDriftDetector > m_driftDetector
std::shared_ptr< vpRBVisualOdometry > m_odometry
vpSilhouettePointsExtractionSettings m_depthSilhouetteSettings
Settings for silhouette extraction.
vpCameraParameters m_cam
Camera intrinsics.
void checkDimensionsOrThrow(const vpImage< T > &I, const std::string &imgType) const
Check that a given image has the correct dimensions, previously specified with setCameraParameters or...
vpObjectCentricRenderer m_renderer
3D renderer
vpCameraParameters getCameraParameters() const
Get the camera intrinsics that are used to render the 3D object and process the tracked frames.
unsigned m_vvsIterations
Maximum number of optimization iterations.
bool m_firstIteration
Whether this is the first iteration.
void setScaleInvariantRegularization(bool invariant)
double m_muIterFactor
Factor with which to multiply mu at every iteration during optimization.
void updateRender(vpRBFeatureTrackerInput &frame)
Update the render data with a render at the last tracked pose.
std::vector< std::shared_ptr< vpRBFeatureTracker > > m_trackers
List of feature trackers.
void setModelPath(const std::string &path)
Set the path to the 3D model to load.
std::string m_modelPath
Location of the 3D model to load.
vpPanda3DRenderParameters m_rendererSettings
Camera specific setup for the 3D Panda renderer.
void displayMask(vpImage< unsigned char > &Imask) const
Convert the mask output by the object segmentation method to a displayable representation....
std::shared_ptr< vpRBConvergenceMetric > m_convergenceMetric
unsigned m_imageHeight
Color and render image dimensions.
std::vector< vpRBSilhouettePoint > extractSilhouettePoints(const vpImage< vpRGBf > &Inorm, const vpImage< float > &Idepth, const vpImage< float > &Ior, const vpImage< unsigned char > &Ivalid, const vpCameraParameters &cam, const vpHomogeneousMatrix &cTcp)
Converts from pixelwise object silhouette representation to an actionable list of silhouette points.
double m_lambda
Optimization gain.
void getPose(vpHomogeneousMatrix &cMo) const
Get the estimated pose of the object in the camera frame.
std::shared_ptr< vpObjectMask > m_mask
double score(const vpHomogeneousMatrix &cMo, const vpImage< unsigned char > &I, const vpImage< vpRGBa > &IRGB, const vpImage< float > &depth)
Using the preconfigured drift detector, score a given pose by comparing the render with the given fra...
vpRBTrackingResult track(const vpImage< unsigned char > &I)
track and re-estimate the pose of the object in this frame, given only a grayscale image The pose aft...
vpHomogeneousMatrix m_cMoPrev
Previous pose of the object in the camera frame.
bool shouldRenderSilhouette()
Returns whether the renderer should render the silhouette information.
void loadConfiguration(const nlohmann::json &j)
Update the tracker with a new json configuration.
void addTracker(std::shared_ptr< vpRBFeatureTracker > tracker)
Add a new feature to track.
vpRBFeatureTrackerInput m_currentFrame
void loadConfigurationFile(const std::string &filename)
Update tracker settings from a .json file. See the tutorials for a full description of the json forma...
bool m_rendererIsSetup
void setupRenderer(const std::string &file)
Setup the renderer, and load the 3D model.
void display(const vpImage< unsigned char > &I, const vpImage< vpRGBa > &IRGB, const vpImage< unsigned char > &depth)
Displays tracker information such as current pose, object silhouette and tracked features on display ...
unsigned m_imageWidth
vpRBFeatureTrackerInput m_previousFrame
void setSilhouetteExtractionParameters(const vpSilhouettePointsExtractionSettings &settings)
vpMatrix getCovariance() const
vpObjectCentricRenderer & getRenderer()
Get the renderer used to render the object.
void setOptimizationMuIterFactor(double factor)
void setOptimizationInitialMu(double mu)
bool m_modelChanged
void setOptimizationGain(double lambda)
double m_muInit
Initial mu value for Levenberg-Marquardt.
void setPose(const vpHomogeneousMatrix &cMo)
Sets the pose of the object in the camera frame. Should be called when initializing the tracker or wh...
vpHomogeneousMatrix m_cMo
Current pose of the object in the camera frame.
bool m_scaleInvariantOptim
Whether to use diagonal scaling in Levenberg-Marquardt regularization.
void setMaxOptimizationIters(unsigned int iters)
void startTracking()
Method that should be called before starting tracking.
void setCameraParameters(const vpCameraParameters &cam, unsigned h, unsigned w)
Sets the camera intrinsics and image resolution for the images where the object will be tracked.
void displaySilhouette(const vpImage< T > &I, const vpRBFeatureTrackerInput &frame)
Display the object silhouette of the frame in I.
void setOdometryMetricAndThreshold(const double metricValue, const double metricThreshold)
void setStoppingReason(vpRBTrackingStoppingReason reason)
void setOdometryMotion(const vpHomogeneousMatrix &cMo_before, const vpHomogeneousMatrix &cMcp, const vpHomogeneousMatrix &cMo_after)
vpRBTrackingTimings & timer()
void onEndIter(const vpHomogeneousMatrix &cMo, const vpColVector &v, const double convergenceMetric, const vpMatrix &JTJ, const vpColVector &JTR, double mu)
void beforeIter(const vpHomogeneousMatrix &cMo)
void logFeatures(unsigned int iter, unsigned int maxIters, const std::vector< std::shared_ptr< vpRBFeatureTracker > > &features)
void setSilhouetteTime(double elapsed)
void setDriftDetectionTime(double elapsed)
void setTrackerFeatureTrackingTime(int id, double elapsed)
void addTrackerVVSTime(int id, double elapsed)
void setTrackerIterStartTime(int id, double elapsed)
void setMaskTime(double elapsed)
void setInitVVSTime(int id, double elapsed)
void setRenderTime(double elapsed)
void setTrackerFeatureExtractionTime(int id, double elapsed)
void setOdometryTime(double elapsed)
Class that consider the case of a translation vector.
double zNear
Binary image indicating whether a given pixel is part of the silhouette.
vpImage< float > depth
Image containing the per-pixel normal vector (RGB, in object space).
vpHomogeneousMatrix cMo
vpImage< vpRGBf > normals
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.
vpImage< float > silhouetteCanny
vpTranslationVector objectCenter
Center of the 3D bounding box of the object. Expressed in the object frame.