138 def trackFeatures(self, frame: RBFeatureTrackerInput, previousFrame: RBFeatureTrackerInput, cMo: HomogeneousMatrix):
144 with torch.no_grad():
146 h, w = frame.I.getRows(), frame.I.getCols()
147 depth_map = frame.renders.depth
149 visible_indices = np.ascontiguousarray(self.
object_map.point_map.getVisiblePoints(h, w, frame.cam, frame.renders.cMo, depth_map))
150 if len(visible_indices) > 0:
151 visible_object_descriptors = self.
object_map.descriptors[visible_indices]
167 Z = frame.depth[int(kp[1]), int(kp[0])]
169 valid_indices.append(i)
171 self.
Zs = np.ascontiguousarray(self.
Zs)
178 def initVVS(self, frame: RBFeatureTrackerInput, _previousFrame: RBFeatureTrackerInput, _cMo: HomogeneousMatrix):
181 self.weighted_error.resize(self.
numFeatures,
False)
188 xoc, yoc = PixelMeterConversion.convertPoints(frame.cam, current_px_matched[:, 0], current_px_matched[:, 1])
190 self.
current_xy_obj = Matrix.view(np.ascontiguousarray(np.concatenate((xoc[...,
None], yoc[...,
None]), axis=-1), dtype=np.float64))
192 xyz = np.concatenate([arr[:,
None]
for arr
in (xoc * self.
Zs, yoc * self.
Zs, self.
Zs)], axis=-1, dtype=np.float64)
200 def computeVVSIter(self, frame: RBFeatureTrackerInput, cMo: HomogeneousMatrix, iteration: int):
215 bb = frame.renders.boundingBox
216 rect_diagonal = np.sqrt(bb.getHeight() ** 2 + bb.getWidth() ** 2)
218 threshold = (rect_diagonal / np.sqrt(cam.get_px() ** 2 + cam.get_py() ** 2)) * 0.001
220 diff = frame.renders.zFar - frame.renders.zNear
221 threshold = diff * 0.001
224 self.
robust.setMinMedianAbsoluteDeviation(threshold)
225 error_per_point = np.linalg.norm(self.
error.numpy().reshape((-1, 2
if not self.
use_3d else 3)), axis=-1)
234 self.updateOptimizerTerms(cMo)
240 cX, xs, uvs = Matrix(), Matrix(), Matrix()
241 map_indices = ArrayInt2D.view(np.ascontiguousarray(self.
idx_object_map_matched[:,
None]).astype(np.int32))
242 self.
object_map.point_map.project(self.
frame.cam, map_indices, cMo, cX, xs, uvs)
259 def display(self, cam: CameraParameters, I: ImageGray, IRGB: ImageRGBa, I_depth: ImageGray):
264 error = np.linalg.norm(uvs - ps, axis=1)
266 max_error_display = np.maximum(np.max(error), self.
object_map.point_map.getOutlierReprojectionErrorThreshold())
267 ps = np.rint(ps).astype(np.int32)
268 uvs = np.rint(uvs).astype(np.int32)
269 if self.
display_type == RBXFeatFeatureTracker.DisplayType.SIMPLE:
270 Display.displayCrosses(IRGB, ps[:, 1], ps[:, 0], 8, Color.green, 1)
271 elif self.
display_type == RBXFeatFeatureTracker.DisplayType.SIMPLE_MODEL_AND_PROJ:
272 Display.displayCrosses(IRGB, ps[:, 1], ps[:, 0], 8, Color.green, 1)
273 Display.displayCrosses(IRGB, uvs[:, 1], np.rint(uvs[:, 0]).astype(np.int32), 8, Color.red, 1)
275 elif self.
display_type == RBXFeatFeatureTracker.DisplayType.WEIGHT_AND_ERROR:
276 weights = (np.sum(self.weights.numpy().reshape((len(error), 2)), axis=-1) / 2.0)
278 for p
in range(len(error)):
280 np.rint((error[p] / max_error_display) * 255).astype(np.uint8),
281 np.rint(weights[p] * 255.0).astype(np.uint8),
284 Display.displayCircleStatic(IRGB, ps[p, 1], ps[p, 0], 2, c,
True, 1)
286 raise RuntimeError(
'Display time not implemented')