#include <visp3/core/vpConfig.h>
#include <visp3/core/vpIoTools.h>
#include <visp3/io/vpImageIo.h>
#include <visp3/mbt/vpMbGenericTracker.h>
#include <visp3/io/vpVideoReader.h>
#if defined(VISP_HAVE_MINIZ) && defined(VISP_HAVE_WORKING_REGEX)
#ifdef ENABLE_VISP_NAMESPACE
#endif
namespace
{
{
std::vector<double> vec { t[0], t[1], t[2], tu[0], tu[1], tu[2] };
return vec;
}
}
int main(int argc, char **argv)
{
bool opencv_backend = false;
std::string npz_filename = "npz_tracking_teabox.npz";
bool color_mode = false;
bool save_alpha = false;
bool print_cMo = false;
for (int i = 1; i < argc; i++) {
if (std::string(argv[i]) == "--cv-backend") {
opencv_backend = true;
}
else if ((std::string(argv[i]) == "--save" || std::string(argv[i]) == "-o") && (i+1 < argc)) {
npz_filename = argv[
i+1];
}
else if (std::string(argv[i]) == "--color" || std::string(argv[i]) == "-c") {
color_mode = true;
}
else if (std::string(argv[i]) == "--alpha" || std::string(argv[i]) == "-a") {
save_alpha = true;
}
else if (std::string(argv[i]) == "--print-cMo") {
print_cMo = true;
}
else {
std::cout << "Options:" << std::endl;
std::cout << " --cv-backend use OpenCV if available for in-memory PNG encoding" << std::endl;
std::cout << " --save / -o output filename" << std::endl;
std::cout << " --color save RGB data" << std::endl;
std::cout << " --alpha if --color opton, save RGBa data" << std::endl;
std::cout << " --print-cMo print cMo" << std::endl;
return EXIT_SUCCESS;
}
}
std::cout << "Save file: " << npz_filename << std::endl;
std::cout << "OpenCV backend? " << opencv_backend << std::endl;
std::cout << "Color image? " << color_mode << std::endl;
std::cout << "Save alpha channel? " << save_alpha << std::endl;
std::string opt_videoname = "model/teabox/teabox.mp4";
std::string opt_modelname = "model/teabox/teabox.cao";
if (!parentname.empty())
objectname = parentname + "/" + objectname;
std::cout << "Video name: " << opt_videoname << std::endl;
cam.initPersProjWithoutDistortion(839, 839, 325, 243);
tracker.loadModel(objectname +
".cao");
cMo[0][0] = 0.4889237963;
cMo[0][1] = 0.8664706489;
cMo[0][2] = 0.1009065709;
cMo[0][3] = -0.07010159786;
cMo[1][0] = 0.4218451176;
cMo[1][1] = -0.1335995053;
cMo[1][2] = -0.8967708007;
cMo[1][3] = -0.08363026223;
cMo[2][0] = -0.7635445096;
cMo[2][1] = 0.4810195286;
cMo[2][2] = -0.4308363901;
cMo[2][3] = 0.4510066725;
const int height = I.getRows();
const int width = I.getCols();
int channel = 1;
if (color_mode) {
channel = save_alpha ? 4 : 3;
}
std::vector<unsigned char> img_buffer;
visp::cnpy::npz_save(npz_filename,
"camera_name", &vec_camera_name[0], { vec_camera_name.size() },
"w");
const double cam_px =
cam.get_px(), cam_py =
cam.get_py(), cam_u0 =
cam.get_u0(), cam_v0 =
cam.get_v0();
std::vector<double> vec_poses;
std::vector<int> vec_img_data_size;
std::vector<unsigned char> vec_img_data;
std::vector<double> times;
if (print_cMo) {
std::cout <<
"\ncMo:\n" <<
cMo << std::endl;
}
if (color_mode) {
}
else {
}
times.push_back(end-start);
vec_img_data_size.push_back(static_cast<int>(img_buffer.size()));
vec_img_data.insert(vec_img_data.end(), img_buffer.begin(), img_buffer.end());
std::vector<double> vec_pose = poseToVec(cMo);
vec_poses.insert(vec_poses.end(), vec_pose.begin(), vec_pose.end());
std::map<std::string, std::vector<std::vector<double> > > mapOfModels;
std::map<std::string, unsigned int> mapOfW;
std::map<std::string, unsigned int> mapOfH;
std::map<std::string, vpHomogeneousMatrix> mapOfcMos;
std::map<std::string, vpCameraParameters> mapOfCams;
tracker.getModelForDisplay(mapOfModels, mapOfW, mapOfH, mapOfcMos, mapOfCams);
std::vector<std::vector<double>> model = mapOfModels[
camera_name];
const std::string model_iter_sz = model_iter + "_sz";
const size_t model_size = model.size();
for (
size_t i = 0;
i < model.size();
i++) {
char buffer[100];
int res = snprintf(buffer, 100, "model_%06zu_%06zu", iter, i);
if (res > 0 && res < 100) {
const std::string model_iter_data = buffer;
std::vector<double> &vec_line = model[
i];
}
}
}
std::cout <<
"Mean time for image encoding: " <<
vpMath::getMean(times) <<
" ms ; Median time: "
visp::cnpy::npz_save(npz_filename,
"vec_img_data_size", vec_img_data_size.data(), { vec_img_data_size.size() },
"a");
assert(iter == vec_poses.size()/6);
visp::cnpy::npz_save(npz_filename,
"vec_poses", vec_poses.data(), { static_cast<size_t>(iter), 6 },
"a");
return EXIT_SUCCESS;
}
#else
#include <iostream>
int main()
{
std::cout << "This tutorial needs c++11 flags" << std::endl;
#ifndef VISP_HAVE_MINIZ
std::cerr << "You also need to enable npz I/O functions" << std::endl;
#endif
}
#endif
Generic class defining intrinsic camera parameters.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
vpImageIoBackendType
Image IO backend for only jpeg and png formats image loading and saving.
@ IO_STB_IMAGE_BACKEND
Use embedded stb_image library.
@ IO_OPENCV_BACKEND
Use OpenCV imgcodecs module.
static void writePNGtoMem(const vpImage< unsigned char > &I, std::vector< unsigned char > &buffer, int backend=IO_DEFAULT_BACKEND)
Definition of the vpImage class member functions.
static double getMedian(const std::vector< double > &v)
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
static double getMean(const std::vector< double > &v)
Real-time 6D object pose tracking using its CAD model.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void open(vpImage< vpRGBa > &I) VP_OVERRIDE
void setFileName(const std::string &filename)
void acquire(vpImage< vpRGBa > &I) VP_OVERRIDE
VISP_EXPORT void npz_save(const std::string &zipname, std::string fname, const std::vector< std::string > &data_vec, const std::vector< size_t > &shape, const std::string &mode="w")
VISP_EXPORT double measureTimeMs()