diff --git a/butia_image2world/scripts/butia_image2world/image2world/image2world.py b/butia_image2world/scripts/butia_image2world/image2world/image2world.py
index 85354ea..36700dc 100755
--- a/butia_image2world/scripts/butia_image2world/image2world/image2world.py
+++ b/butia_image2world/scripts/butia_image2world/image2world/image2world.py
@@ -137,8 +137,8 @@ def __detectionDescriptionProcessing(self, data, description2d, header):
w, h, _ = array_point_cloud.shape
bbox_limits[0] = bbox_limits[0] if bbox_limits[0] > 0 else 0
- bbox_limits[1] = bbox_limits[1] if bbox_limits[1] > 0 else 0
- bbox_limits[2] = bbox_limits[2] if bbox_limits[2] < w else w-1
+ bbox_limits[1] = bbox_limits[1] if bbox_limits[1] < w else w-1
+ bbox_limits[2] = bbox_limits[2] if bbox_limits[2] > 0 else 0
bbox_limits[3] = bbox_limits[3] if bbox_limits[3] < h else h-1
desc_point_cloud = array_point_cloud[bbox_limits[2]:bbox_limits[3], bbox_limits[0]:bbox_limits[1], :]
@@ -215,22 +215,21 @@ def __detectionDescriptionProcessing(self, data, description2d, header):
h, w = image_depth.shape
bbox_limits[0] = bbox_limits[0] if bbox_limits[0] > 0 else 0
- bbox_limits[1] = bbox_limits[1] if bbox_limits[1] > 0 else 0
- bbox_limits[2] = bbox_limits[2] if bbox_limits[2] < w else w-1
+ bbox_limits[1] = bbox_limits[1] if bbox_limits[1] < w else w-1
+ bbox_limits[2] = bbox_limits[2] if bbox_limits[2] > 0 else 0
bbox_limits[3] = bbox_limits[3] if bbox_limits[3] < h else h-1
- self.__mountLutTable(camera_info)
+ self.__mountLutTable(camera_info)
center_depth = image_depth[int(center_y), int(center_x)]
- if center_depth == 0:
+ if center_depth <= 0:
rospy.logwarn('INVALID DEPTH VALUE')
center_depth/= 1000.
limits = np.asarray([(bbox_limits[0], bbox_limits[2]), (bbox_limits[1], bbox_limits[3])])
-
vertices_3d = np.zeros((len(limits), 3))
vertices_3d[:, :2] = self.lut_table[limits[:, 1], limits[:, 0], :]*center_depth
@@ -267,7 +266,24 @@ def __detectionDescriptionProcessing(self, data, description2d, header):
min_bound = np.min(vertices_3d, axis=0)
max_bound = np.max(vertices_3d, axis=0)
- box = o3d.geometry.AxisAlignedBoundingBox(min_bound, max_bound)
+ if self.fit_bbox:
+ box_depths = image_depth[limits[0, 1]:limits[1, 1], limits[0, 0]:limits[1, 0]]/1000.
+ box_lut = self.lut_table[limits[0, 1]:limits[1, 1], limits[0, 0]:limits[1, 0], :]
+
+ box_points = np.zeros((box_depths.size, 3))
+
+ box_points[:, 0] = box_lut[:, :, 0].flatten()*box_depths.flatten()
+ box_points[:, 1] = box_lut[:, :, 1].flatten()*box_depths.flatten()
+ box_points[:, 2] = box_depths.flatten()
+
+ box_points = box_points[box_points[:, 2] > 0]
+ box_points = box_points[np.logical_and(np.all(box_points > min_bound, axis=1),
+ np.all(box_points < max_bound, axis=1))]
+
+ box = o3d.geometry.AxisAlignedBoundingBox().create_from_points(o3d.utility.Vector3dVector(box_points))
+
+ else:
+ box = o3d.geometry.AxisAlignedBoundingBox(min_bound, max_bound)
#TODO:
'''
@@ -304,9 +320,11 @@ def __poseDescriptionProcessing(self, data, description2d : Description2D, heade
return description3d
- def __createDescription3D(self, source_data, description2d, header):
+ def __createDescription3D(self, source_data, description2d : Description2D, header):
if description2d.type in self.DESCRIPTION_PROCESSING_ALGORITHMS:
- return self.DESCRIPTION_PROCESSING_ALGORITHMS[description2d.type](source_data, description2d, header)
+ description3D : Description3D = self.DESCRIPTION_PROCESSING_ALGORITHMS[description2d.type](source_data, description2d, header)
+ description3D.bbox2D = description2d.bbox
+ return description3D
else:
return None
@@ -415,7 +433,8 @@ def __readParameters(self):
self.publish_debug = rospy.get_param('~publish_debug', False)
self.publish_markers = rospy.get_param('~publish_markers', True)
self.color = rospy.get_param('~color', [255, 0, 0])
- self.depth_mean_error = rospy.get_param('~depth_mean_error', 0.017)
+ self.depth_mean_error = rospy.get_param('~depth_mean_error', 0.05)
+ self.fit_bbox = rospy.get_param('~fit_bbox', False)
if __name__ == '__main__':
rospy.init_node('image2world_node', anonymous = True)
diff --git a/butia_vision_launchfiles/yolo_tracker_recognition.launch b/butia_vision_launchfiles/yolo_tracker_recognition.launch
index 21fff06..f3da989 100644
--- a/butia_vision_launchfiles/yolo_tracker_recognition.launch
+++ b/butia_vision_launchfiles/yolo_tracker_recognition.launch
@@ -11,12 +11,10 @@
-
-
-
+
diff --git a/butia_vision_msgs b/butia_vision_msgs
index 8800c69..1737ac7 160000
--- a/butia_vision_msgs
+++ b/butia_vision_msgs
@@ -1 +1 @@
-Subproject commit 8800c69aad4edb701136adcfe29162b38cc9a34e
+Subproject commit 1737ac710f14f512b38a5183d8593005e20b9104