31#include <visp3/rbt/vpRBTracker.h>
33#if defined(VISP_HAVE_NLOHMANN_JSON)
34#include VISP_NLOHMANN_JSON(json.hpp)
37#include <visp3/core/vpExponentialMap.h>
38#include <visp3/core/vpIoTools.h>
40#include <visp3/ar/vpPanda3DRendererSet.h>
41#include <visp3/ar/vpPanda3DGeometryRenderer.h>
42#include <visp3/ar/vpPanda3DRGBRenderer.h>
43#include <visp3/ar/vpPanda3DFrameworkManager.h>
45#include <visp3/rbt/vpRBFeatureTrackerFactory.h>
46#include <visp3/rbt/vpRBDriftDetectorFactory.h>
47#include <visp3/rbt/vpObjectMaskFactory.h>
48#include <visp3/rbt/vpRBVisualOdometry.h>
49#include <visp3/rbt/vpRBInitializationHelper.h>
81 double sumWeights = 0.0;
82 for (
const std::shared_ptr<vpRBFeatureTracker> &tracker:
m_trackers) {
83 if (tracker->getNumFeatures() == 0) {
87 vpMatrix trackerCov = tracker->getCovariance();
88 double trackerWeight = tracker->getVVSTrackerWeight(1.0);
91 "Expected tracker pose covariance to have dimensions 6x6, but got %dx%d",
96 sumWeights += trackerWeight;
107 "Camera model cannot have distortion. Undistort images before tracking and use the undistorted camera model");
109 if (h == 0 || w == 0) {
112 "Image dimensions must be greater than 0"
134 for (std::shared_ptr<vpRBFeatureTracker> &tracker:
m_trackers) {
158 const std::shared_ptr<vpPanda3DGeometryRenderer> geometryRenderer = std::make_shared<vpPanda3DGeometryRenderer>(
168 bool requiresSilhouetteShader =
false;
169 for (std::shared_ptr<vpRBFeatureTracker> &tracker:
m_trackers) {
170 if (tracker->requiresSilhouetteCandidates()) {
171 requiresSilhouetteShader =
true;
176 static int cannyId = 0;
178 m_renderer.addSubRenderer(std::make_shared<vpPanda3DDepthCannyFilter>(
200 for (std::shared_ptr<vpRBFeatureTracker> tracker :
m_trackers) {
201 if (tracker->requiresDepth() || tracker->requiresRGB()) {
209 return track(frameInput);
214 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
215 if (tracker->requiresDepth()) {
223 frameInput.
IRGB = IRGB;
225 return track(frameInput);
246 frameInput.
IRGB = IRGB;
247 frameInput.
depth = depth;
249 return track(frameInput);
283 bool requiresSilhouetteCandidates =
false;
284 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
285 if (tracker->requiresSilhouetteCandidates()) {
286 requiresSilhouetteCandidates =
true;
292 if (requiresSilhouetteCandidates) {
304 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
306 tracker->onTrackingIterStart(input,
m_cMo);
320 bool shouldRerender =
true;
323 const double metric = (*m_convergenceMetric)(
m_cam,
m_cMo, cMo_beforeOdo);
328 if (shouldRerender) {
330 if (requiresSilhouetteCandidates) {
345 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
352 std::cerr <<
"Tracker " <<
id <<
" raised an exception in extractFeatures" << std::endl;
359 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
365 std::cerr <<
"Tracker " <<
id <<
" raised an exception in trackFeatures" << std::endl;
373 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
381 double bestError = std::numeric_limits<double>::max();
385 unsigned int iter = 0;
389 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
392 tracker->computeVVSIter(input,
m_cMo, iter);
395 std::cerr <<
"Tracker " <<
id <<
" raised an exception in computeVVSIter" << std::endl;
405 unsigned int numFeatures = 0;
407 bool shouldComputeVelocityInObjectFrame =
false;
408 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
409 if (tracker->hasIgnoredDofs()) {
410 shouldComputeVelocityInObjectFrame =
true;
415 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
416 tracker->setComputeJacobianObjectSpace(shouldComputeVelocityInObjectFrame);
419 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
420 if (tracker->getNumFeatures() > 0) {
421 numFeatures += tracker->getNumFeatures();
422 const double weight = tracker->getVVSTrackerWeight(
static_cast<double>(iter) /
static_cast<double>(
m_vvsIterations));
424 if (validQuantities) {
425 LTL += weight * tracker->getLTL();
426 LTR += weight * tracker->getLTR();
427 error += weight * (tracker->getWeightedError()).sumSquare();
430 std::cerr << std::endl;
431 std::cerr <<
"There were NaN values in tracker " <<
id << std::endl;
432 std::cerr <<
"LTL = " << tracker->getLTL() << std::endl;
433 std::cerr <<
"LTR = " << tracker->getLTR().t() << std::endl;
434 std::cerr <<
"Weight = " << weight << std::endl;
435 std::cerr <<
"This tracker was ignored." << std::endl;
442 if (numFeatures >= 6) {
444 if (error < bestError) {
453 for (
unsigned int i = 0; i < 6; ++i) {
459 v = -
m_lambda * ((LTL + mu * H).pseudoInverse(LTL.
getRows() * std::numeric_limits<double>::epsilon()) * LTR);
461 if (shouldComputeVelocityInObjectFrame) {
470 std::cerr <<
"Could not compute pseudo inverse" << std::endl;
476 double convergenceMetricValue = 0.0;
477 bool converged =
false;
479 convergenceMetricValue = (*m_convergenceMetric)(
m_cam, m_cMoPrevIter,
m_cMo);
480 if (convergenceMetricValue < m_convergenceMetric->getConvergenceThreshold()) {
491 m_cMoPrevIter =
m_cMo;
507 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
508 tracker->onTrackingIterEnd(
m_cMo);
563 float clipNear, clipFar;
564 m_renderer.computeClipping(clipNear, clipFar);
571 double diameter = (tMax - tMin).frobeniusNorm();
581 if (renderSilhouette) {
598#ifdef VISP_HAVE_OPENMP
599#pragma omp parallel sections
603#ifdef VISP_HAVE_OPENMP
613#ifdef VISP_HAVE_OPENMP
617 if (renderSilhouette) {
634std::vector<vpRBSilhouettePoint>
639 std::vector<std::pair<unsigned int, unsigned int>> candidates =
642 std::vector<vpRBSilhouettePoint> points;
645 for (
unsigned int i = 0; i < candidates.size(); ++i) {
646 unsigned int n = candidates[i].first, m = candidates[i].second;
647 double theta = silhouetteCanny[n][m];
648 if (std::isnan(theta)) {
652 norm[0] = Inorm[n][m].R;
653 norm[1] = Inorm[n][m].G;
654 norm[2] = Inorm[n][m].B;
655 const double l = std::sqrt(norm[0] * norm[0] + norm[1] * norm[1] + norm[2] * norm[2]);
658 const double Z = Idepth[n][m];
659#if defined(VISP_DEBUG_RB_TRACKER)
660 if (fabs(theta) > M_PI + 1e-6) {
665 p.detectSilhouette(Idepth);
675 if (tracker ==
nullptr) {
698 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
699 if (tracker->featuresShouldBeDisplayed()) {
710#if defined(VISP_HAVE_NLOHMANN_JSON)
713 std::ifstream jsonFile(filename);
714 if (!jsonFile.good()) {
717 nlohmann::json settings;
719 settings = nlohmann::json::parse(jsonFile);
721 catch (nlohmann::json::parse_error &e) {
722 std::stringstream msg;
723 msg <<
"Could not parse JSON file : \n";
724 msg << e.what() << std::endl;
725 msg <<
"Byte position of error: " << e.byte;
743 if (j.contains(
"metric")) {
747 if (j.contains(
"camera")) {
748 const nlohmann::json cameraSettings = j.at(
"camera");
749 m_cam = cameraSettings.at(
"intrinsics");
755 if (j.contains(
"model")) {
759 const nlohmann::json vvsSettings = j.at(
"vvs");
769 const nlohmann::json features = j.at(
"features");
771 for (
const nlohmann::json &trackerSettings: features) {
772 std::shared_ptr<vpRBFeatureTracker> tracker = featureFactory.
buildFromJson(trackerSettings);
773 if (tracker ==
nullptr) {
776 "Cannot instantiate subtracker with the current settings, make sure that the type is registered. Settings: %s",
777 trackerSettings.dump(2).c_str()
783 if (j.contains(
"mask")) {
785 const nlohmann::json maskSettings = j.at(
"mask");
790 "Cannot instantiate object mask with the current settings, make sure that the type is registered. Settings: %s",
791 maskSettings.dump(2).c_str());
794 if (j.contains(
"drift")) {
796 const nlohmann::json driftSettings = j.at(
"drift");
801 "Cannot instantiate drift detection with the current settings, make sure that the type is registered in the factory"
unsigned int getCols() const
static bool isFinite(const vpArray2D< double > &A)
unsigned int getRows() const
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
Implementation of column vector and the associated operations.
std::shared_ptr< T > buildFromJson(const nlohmann::json &j)
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
@ functionNotImplementedError
Function not implemented.
@ dimensionError
Bad dimension.
static vpHomogeneousMatrix direct(const vpColVector &v)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
Definition of the vpImage class member functions.
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
static bool isFinite(double value)
Implementation of a matrix and operations on matrices.
vpMatrix pseudoInverse(double svThreshold=1e-6) const
Single object focused renderer.
A factory that can be used to create Object segmentation algorithms from JSON data.
static vpObjectMaskFactory & getFactory()
Class representing an ambient light.
Implementation of canny filtering, using Sobel kernels.
static vpPanda3DFrameworkManager & getInstance()
void enableSingleRenderer(vpPanda3DBaseRenderer &renderer)
Renderer that outputs object geometric information.
static std::shared_ptr< vpRBConvergenceMetric > loadFromJSON(const nlohmann::json &j)
A factory that can be used to instanciate drift detection algorithms from JSON data.
static vpRBDriftDetectorFactory & getFactory()
A factory to instantiate feature trackers from JSON data.
static vpRBFeatureTrackerFactory & getFactory()
Silhouette point simple candidate representation.
bool m_displaySilhouette
Metric used to compare the motion between different poses.
std::shared_ptr< vpRBDriftDetector > m_driftDetector
std::shared_ptr< vpRBVisualOdometry > m_odometry
vpSilhouettePointsExtractionSettings m_depthSilhouetteSettings
Settings for silhouette extraction.
vpCameraParameters m_cam
Camera intrinsics.
void checkDimensionsOrThrow(const vpImage< T > &I, const std::string &imgType) const
Check that a given image has the correct dimensions, previously specified with setCameraParameters or...
vpObjectCentricRenderer m_renderer
3D renderer
vpCameraParameters getCameraParameters() const
Get the camera intrinsics that are used to render the 3D object and process the tracked frames.
unsigned m_vvsIterations
Maximum number of optimization iterations.
bool m_firstIteration
Whether this is the first iteration.
void setScaleInvariantRegularization(bool invariant)
double m_muIterFactor
Factor with which to multiply mu at every iteration during optimization.
void updateRender(vpRBFeatureTrackerInput &frame)
Update the render data with a render at the last tracked pose.
std::vector< std::shared_ptr< vpRBFeatureTracker > > m_trackers
List of feature trackers.
void setModelPath(const std::string &path)
Set the path to the 3D model to load.
std::string m_modelPath
Location of the 3D model to load.
vpPanda3DRenderParameters m_rendererSettings
Camera specific setup for the 3D Panda renderer.
void displayMask(vpImage< unsigned char > &Imask) const
Convert the mask output by the object segmentation method to a displayable representation....
std::shared_ptr< vpRBConvergenceMetric > m_convergenceMetric
unsigned m_imageHeight
Color and render image dimensions.
std::vector< vpRBSilhouettePoint > extractSilhouettePoints(const vpImage< vpRGBf > &Inorm, const vpImage< float > &Idepth, const vpImage< float > &Ior, const vpImage< unsigned char > &Ivalid, const vpCameraParameters &cam, const vpHomogeneousMatrix &cTcp)
Converts from pixelwise object silhouette representation to an actionable list of silhouette points.
double m_lambda
Optimization gain.
void getPose(vpHomogeneousMatrix &cMo) const
Get the estimated pose of the object in the camera frame.
std::shared_ptr< vpObjectMask > m_mask
double score(const vpHomogeneousMatrix &cMo, const vpImage< unsigned char > &I, const vpImage< vpRGBa > &IRGB, const vpImage< float > &depth)
Using the preconfigured drift detector, score a given pose by comparing the render with the given fra...
vpRBTrackingResult track(const vpImage< unsigned char > &I)
track and re-estimate the pose of the object in this frame, given only a grayscale image The pose aft...
vpHomogeneousMatrix m_cMoPrev
Previous pose of the object in the camera frame.
bool shouldRenderSilhouette()
Returns whether the renderer should render the silhouette information.
void loadConfiguration(const nlohmann::json &j)
Update the tracker with a new json configuration.
void addTracker(std::shared_ptr< vpRBFeatureTracker > tracker)
Add a new feature to track.
vpRBFeatureTrackerInput m_currentFrame
void loadConfigurationFile(const std::string &filename)
Update tracker settings from a .json file. See the tutorials for a full description of the json forma...
void setupRenderer(const std::string &file)
Setup the renderer, and load the 3D model.
void display(const vpImage< unsigned char > &I, const vpImage< vpRGBa > &IRGB, const vpImage< unsigned char > &depth)
Displays tracker information such as current pose, object silhouette and tracked features on display ...
vpRBFeatureTrackerInput m_previousFrame
void setSilhouetteExtractionParameters(const vpSilhouettePointsExtractionSettings &settings)
vpMatrix getCovariance() const
vpObjectCentricRenderer & getRenderer()
Get the renderer used to render the object.
void setOptimizationMuIterFactor(double factor)
void setOptimizationInitialMu(double mu)
void setOptimizationGain(double lambda)
double m_muInit
Initial mu value for Levenberg-Marquardt.
void setPose(const vpHomogeneousMatrix &cMo)
Sets the pose of the object in the camera frame. Should be called when initializing the tracker or wh...
vpHomogeneousMatrix m_cMo
Current pose of the object in the camera frame.
bool m_scaleInvariantOptim
Whether to use diagonal scaling in Levenberg-Marquardt regularization.
void setMaxOptimizationIters(unsigned int iters)
void startTracking()
Method that should be called before starting tracking.
void setCameraParameters(const vpCameraParameters &cam, unsigned h, unsigned w)
Sets the camera intrinsics and image resolution for the images where the object will be tracked.
void displaySilhouette(const vpImage< T > &I, const vpRBFeatureTrackerInput &frame)
Display the object silhouette of the frame in I.
void setOdometryMetricAndThreshold(const double metricValue, const double metricThreshold)
void setStoppingReason(vpRBTrackingStoppingReason reason)
void setOdometryMotion(const vpHomogeneousMatrix &cMo_before, const vpHomogeneousMatrix &cMcp, const vpHomogeneousMatrix &cMo_after)
vpRBTrackingTimings & timer()
void onEndIter(const vpHomogeneousMatrix &cMo, const vpColVector &v, const double convergenceMetric, const vpMatrix &JTJ, const vpColVector &JTR, double mu)
void beforeIter(const vpHomogeneousMatrix &cMo)
void logFeatures(unsigned int iter, unsigned int maxIters, const std::vector< std::shared_ptr< vpRBFeatureTracker > > &features)
void setSilhouetteTime(double elapsed)
void setDriftDetectionTime(double elapsed)
void setTrackerFeatureTrackingTime(int id, double elapsed)
void addTrackerVVSTime(int id, double elapsed)
void setTrackerIterStartTime(int id, double elapsed)
void setMaskTime(double elapsed)
void setInitVVSTime(int id, double elapsed)
void setRenderTime(double elapsed)
void setTrackerFeatureExtractionTime(int id, double elapsed)
void setOdometryTime(double elapsed)
Class that consider the case of a translation vector.
double zNear
Binary image indicating whether a given pixel is part of the silhouette.
vpImage< float > depth
Image containing the per-pixel normal vector (RGB, in object space).
vpImage< vpRGBf > normals
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.
vpImage< float > silhouetteCanny
vpTranslationVector objectCenter
Center of the 3D bounding box of the object. Expressed in the object frame.