32#include <visp3/core/vpConfig.h>
34#ifdef ENABLE_VISP_NAMESPACE
38#ifndef VISP_HAVE_REALSENSE2
42 std::cerr <<
"To run this tutorial, recompile ViSP with the Realsense third party library" << std::endl;
47#include <visp3/sensor/vpRealSense2.h>
48#include <visp3/io/vpParseArgv.h>
50#include <visp3/ar/vpPanda3DFrameworkManager.h>
52#include <visp3/rbt/vpRBTracker.h>
54#include "render-based-tutorial-utils.h"
56#ifndef DOXYGEN_SHOULD_SKIP_THIS
67 parser.addArgument(
"--height", height,
false,
"Realsense requested image height")
68 .addArgument(
"--width", width,
false,
"Realsense requested image width")
69 .addArgument(
"--fps", fps,
false,
"Realsense requested framerate");
87#ifdef VISP_HAVE_OPENMP
88#pragma omp parallel for
90 for (
int i = 0; i < static_cast<int>(depthRaw.
getSize()); ++
i) {
91 depth.bitmap[
i] = depthScale *
static_cast<float>(depthRaw.
bitmap[
i]);
92 IdepthDisplay.
bitmap[
i] =
depth.bitmap[
i] > maxZDisplay ? 0 :
static_cast<unsigned int>((
depth.bitmap[
i] / maxZDisplay) * 255.f);
96int main(
int argc,
const char **argv)
100 vpRBTrackerTutorial::BaseArguments baseArgs;
102 vpRBTrackerTutorial::vpRBExperimentLogger logger;
103 vpRBTrackerTutorial::vpRBExperimentPlotter
plotter;
107 "Tutorial showing the usage of the Render-Based tracker with a RealSense camera",
111 baseArgs.registerArguments(parser);
113 logger.registerArguments(parser);
114 plotter.registerArguments(parser);
118 baseArgs.postProcessArguments();
119 plotter.postProcessArguments(baseArgs.display);
123 if (baseArgs.enableRenderProfiling) {
124 vpRBTrackerTutorial::enableRendererProfiling();
128 std::cout <<
"Loading tracker: " << baseArgs.trackerConfiguration << std::endl;
130 tracker.loadConfigurationFile(baseArgs.trackerConfiguration);
131 if (!baseArgs.modelPath.empty()) {
132 tracker.setModelPath(baseArgs.modelPath);
138 const unsigned int width = realsenseArgs.width,
height = realsenseArgs.height;
139 const unsigned fps = realsenseArgs.fps;
142 std::cout <<
"Opening realsense with settings: " <<
width <<
"x" <<
height <<
" @ " <<
fps <<
"fps" << std::endl;
144 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
145 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
146 rs2::align align_to(RS2_STREAM_COLOR);
148 realsense.
open(config);
151 std::cout <<
"Caught an exception: " <<
e.what() << std::endl;
152 std::cout <<
"Check if the Realsense camera is connected..." << std::endl;
160 tracker.setCameraParameters(cam, height, width);
178 for (
int i = 0;
i < 10; ++
i) {
185 std::cout <<
"Creating displays..." << std::endl;
186 std::vector<std::shared_ptr<vpDisplay>> displays, displaysDebug;
189 if (baseArgs.display) {
190 displays = vpRBTrackerTutorial::createDisplays(Id, Icol, IdepthDisplay, IProbaDisplay);
191 if (baseArgs.debugDisplay) {
195 "Normals in object frame", InormDisplay,
196 "Depth canny", cannyDisplay
204 if (baseArgs.display && !baseArgs.hasInlineInit()) {
207 realsense.
acquire((
unsigned char *)Icol.bitmap, (
unsigned char *)depthRaw.
bitmap,
nullptr,
nullptr, &align_to);
208 updateDepth(depthRaw, depthScale, baseArgs.maxDepthDisplay, depth, IdepthDisplay);
227 std::cout <<
"Starting init" << std::endl;
228 if (baseArgs.hasInlineInit()) {
229 tracker.setPose(baseArgs.cMoInit);
231 else if (baseArgs.display) {
232 tracker.initClick(Id, baseArgs.initFile,
true);
239 std::cout <<
"Starting pose: " <<
vpPoseVector(cMo).
t() << std::endl;
242 if (baseArgs.display) {
248 unsigned int iter = 1;
255 realsense.
acquire((
unsigned char *)Icol.bitmap, (
unsigned char *)depthRaw.
bitmap,
nullptr,
nullptr, &align_to);
256 updateDepth(depthRaw, depthScale, baseArgs.maxDepthDisplay, depth, IdepthDisplay);
268 case vpRBTrackingStoppingReason::EXCEPTION:
270 std::cout <<
"Encountered an exception during tracking, pose was not updated!" << std::endl;
273 case vpRBTrackingStoppingReason::NOT_ENOUGH_FEATURES:
275 std::cout <<
"There were not enough feature to perform tracking!" << std::endl;
278 case vpRBTrackingStoppingReason::OBJECT_NOT_IN_IMAGE:
280 std::cout <<
"Object is not in image!" << std::endl;
283 case vpRBTrackingStoppingReason::CONVERGENCE_CRITERION:
285 std::cout <<
"Convergence criterion reached:" << std::endl;
286 std::cout <<
"- Num iterations: " << result.
getNumIterations() << std::endl;
290 case vpRBTrackingStoppingReason::MAX_ITERS:
299 const std::shared_ptr<vpRBDriftDetector> driftDetector =
tracker.getDriftDetector();
301 if (driftDetector->getScore() < 0.25) {
302 std::cout <<
"Drift detection has low confidence score: " << driftDetector->getScore() << std::endl;
311 if (baseArgs.display) {
312 if (baseArgs.debugDisplay) {
319 tracker.display(Id, Icol, IdepthDisplay);
324 std::stringstream ss;
325 ss <<
"Confidence score: " << std::setprecision(2) << driftDetector->getScore() << std::endl;
334 tracker.displayMask(IProbaDisplay);
346 logger.logFrame(tracker, iter, Id, Icol, IdepthDisplay, IProbaDisplay);
347 std::cout <<
"Iter " <<
iter <<
": " << round(frameEnd - frameStart) <<
"ms" << std::endl;
348 std::cout <<
"- Tracking: " << round(trackingEnd - trackingStart) <<
"ms" << std::endl;
349 std::cout <<
"- Display: " << round(displayEnd - displayStart) <<
"ms" << std::endl;
350 if (baseArgs.verbose) {
351 std::cout << result.
timer() << std::endl;
353 plotter.plot(tracker, (frameEnd - expStart) / 1000.0);
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
@ notImplementedError
Not implemented.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
unsigned int getWidth() const
unsigned int getSize() const
Type * bitmap
points toward the bitmap
unsigned int getHeight() const
Command line argument parsing with support for JSON files. If a JSON file is supplied,...
static vpPanda3DFrameworkManager & getInstance()
Implementation of a pose vector and operations on poses.
Class implementing the Render-Based Tracker (RBT).
vpRBTrackingStoppingReason getStoppingReason() const
vpRBTrackingTimings & timer()
const std::vector< double > & getConvergenceMetricValues() const
unsigned int getNumIterations() const
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
std::vector< std::shared_ptr< vpDisplay > > makeDisplayGrid(unsigned int rows, unsigned int cols, unsigned int startX, unsigned int startY, unsigned int paddingX, unsigned int paddingY, Args &... args)
Create a grid of displays, given a set of images. All the displays will be initialized in the correct...
VISP_EXPORT double measureTimeMs()
VISP_EXPORT int wait(double t0, double t)
void registerArguments(vpJsonArgumentParser &parser)
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.
vpImage< float > silhouetteCanny