Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[draft] Post Camera Synchronization #104

Draft
wants to merge 6 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions modules/docker-compose.perception.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,11 @@ services:
- driver: nvidia
count: 1
capabilities: [ gpu ]
command: /bin/bash -c "ros2 launch camera_object_detection eve_launch.py"
command: /bin/bash -c "ros2 launch camera_object_detection eve.launch.py"
volumes:
- /mnt/wato-drive2/perception_models/yolov8m.pt:/perception_models/yolov8m.pt
- /mnt/wato-drive2/perception_models/traffic_light.pt:/perception_models/traffic_light.pt
- /mnt/wato-drive2/perception_models/traffic_signs_v0.pt:/perception_models/traffic_signs_v1.pt
- /mnt/wato-drive2/perception_models/traffic_signs_v3.pt:/perception_models/traffic_signs.pt

lidar_object_detection:
build:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
import rclpy
from rclpy.node import Node
from message_filters import Subscriber, ApproximateTimeSynchronizer

from sensor_msgs.msg import Image
from vision_msgs.msg import Detection2DArray, Detection2D, ObjectHypothesisWithPose
from std_msgs.msg import Header

from ultralytics.utils.plotting import Annotator, colors

from cv_bridge import CvBridge, CvBridgeError

class CameraSyncNode(Node): # synchronizes visualizations
def __init__(self):
super().__init__('camera_sync_node')
self.get_logger().info("Camera Sync Node")

Check failure on line 17 in src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py

View workflow job for this annotation

GitHub Actions / Autopep8

src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py#L9-L17

from ultralytics.utils.plotting import Annotator, colors from cv_bridge import CvBridge, CvBridgeError - -class CameraSyncNode(Node): # synchronizes visualizations + + +class CameraSyncNode(Node): # synchronizes visualizations def __init__(self): super().__init__('camera_sync_node') self.get_logger().info("Camera Sync Node")
self.declare_parameter("camera_img_topic", "/camera/right/image_color")
self.declare_parameter("camera_detection_topic", "/camera/right/camera_detections")
self.declare_parameter("traffic_signs_topic", "/traffic_signs/right")
self.declare_parameter("traffic_lights_topic", "/traffic_lights/right")
self.declare_parameter("combined_detection_publisher", "/camera/right/combined_detections")
self.declare_parameter("publish_vis_topic", "/camera/right/annotated_img")

self.camera_img_topic = self.get_parameter("camera_img_topic").value
self.camera_detection_topic = self.get_parameter("camera_detection_topic").value
self.traffic_signs_topic = self.get_parameter("traffic_signs_topic").value
self.traffic_lights_topic = self.get_parameter("traffic_lights_topic").value
self.combined_detection_topic = self.get_parameter("combined_detection_publisher").value
self.publish_vis_topic = self.get_parameter("publish_vis_topic").value

self.camera_img_sub = Subscriber(self, Image, self.camera_img_topic)
self.camera_detection_sub = Subscriber(self, Detection2DArray, self.camera_detection_topic)
self.traffic_signs_sub = Subscriber(self, Detection2DArray, self.traffic_signs_topic)
self.traffic_lights_sub = Subscriber(self, Detection2DArray, self.traffic_lights_topic)

self.cv_bridge = CvBridge()
self.line_thickness = 1
self.names = []

self.ts = ApproximateTimeSynchronizer(
[self.camera_img_sub,
self.camera_detection_sub,
self.traffic_signs_sub,

Check failure on line 44 in src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py

View workflow job for this annotation

GitHub Actions / Autopep8

src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py#L37-L44

self.cv_bridge = CvBridge() self.line_thickness = 1 self.names = [] - + self.ts = ApproximateTimeSynchronizer( [self.camera_img_sub, self.camera_detection_sub,
self.traffic_lights_sub],
queue_size=10,
slop=0.1)

self.ts.registerCallback(self.callback)

self.combined_detection_publisher = self.create_publisher(Detection2DArray, self.combined_detection_topic, 10)
self.vis_publisher = self.create_publisher(Image, self.publish_vis_topic, 10)

def process_img(self, image):
try:

Check failure on line 55 in src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py

View workflow job for this annotation

GitHub Actions / Autopep8

src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py#L45-L55

self.traffic_lights_sub], queue_size=10, slop=0.1) - + self.ts.registerCallback(self.callback) - - self.combined_detection_publisher = self.create_publisher(Detection2DArray, self.combined_detection_topic, 10) + + self.combined_detection_publisher = self.create_publisher( + Detection2DArray, self.combined_detection_topic, 10) self.vis_publisher = self.create_publisher(Image, self.publish_vis_topic, 10) def process_img(self, image):
cv_image = self.cv_bridge.imgmsg_to_cv2(image, desired_encoding="passthrough")
except CvBridgeError as e:
self.get_logger().error(str(e))
return
return cv_image

def postprocess_detections(self, detections, annotator):
"""
Post-process draws bouningboxes on camera image.

Check failure on line 65 in src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py

View workflow job for this annotation

GitHub Actions / Autopep8

src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py#L58-L65

self.get_logger().error(str(e)) return return cv_image - + def postprocess_detections(self, detections, annotator): """ Post-process draws bouningboxes on camera image.
Parameters:
detections: A list of dict with the format
{
"label": str,
"bbox": [float],
"conf": float
}
annotator: A ultralytics.yolo.utils.plotting.Annotator for the current image

Returns:
processed_detections: filtered detections
annotator_img: image with bounding boxes drawn on
"""
processed_detections = detections

for det in detections:
label = f'{det["label"]} {det["conf"]:.2f}'
x1, y1, w1, h1 = det["bbox"]
xyxy = [x1, y1, x1 + w1, y1 + h1]
annotator.box_label(xyxy, label, color=colors(1, True))

annotator_img = annotator.result()
return (processed_detections, annotator_img)

def publish_detections(self, detections, msg, feed):
# Publish detections to an detectionList message
detection2darray = Detection2DArray()

Check failure on line 93 in src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py

View workflow job for this annotation

GitHub Actions / Autopep8

src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py#L86-L93

annotator_img = annotator.result() return (processed_detections, annotator_img) - + def publish_detections(self, detections, msg, feed): # Publish detections to an detectionList message detection2darray = Detection2DArray()
# fill header for detection list
detection2darray.header.stamp = msg.header.stamp
detection2darray.header.frame_id = msg.header.frame_id
# populate detection list
if detections is not None and len(detections):
for detection in detections:
detection2d = Detection2D()
detection2d.header.stamp = self.get_clock().now().to_msg()
detection2d.header.frame_id = msg.header.frame_id
detected_object = ObjectHypothesisWithPose()
detected_object.hypothesis.class_id = detection["label"]
detected_object.hypothesis.score = detection["conf"]
detection2d.results.append(detected_object)
detection2d.bbox.center.position.x = detection["bbox"][0]
detection2d.bbox.center.position.y = detection["bbox"][1]
detection2d.bbox.size_x = detection["bbox"][2]
detection2d.bbox.size_y = detection["bbox"][3]

# append detection to detection list
detection2darray.detections.append(detection2d)

self.combined_detection_publisher.publish(detection2darray)

def publish_vis(self, annotated_img, msg):
# Publish visualizations
imgmsg = self.cv_bridge.cv2_to_imgmsg(annotated_img, "bgr8")
imgmsg.header.stamp = msg.header.stamp

Check failure on line 120 in src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py

View workflow job for this annotation

GitHub Actions / Autopep8

src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py#L113-L120

detection2darray.detections.append(detection2d) self.combined_detection_publisher.publish(detection2darray) - + def publish_vis(self, annotated_img, msg): # Publish visualizations imgmsg = self.cv_bridge.cv2_to_imgmsg(annotated_img, "bgr8")
imgmsg.header.frame_id = msg.header.frame_id
self.get_logger().info("Publish img")
self.vis_publisher.publish(imgmsg)

def callback(
self,
camera_img_sub,
camera_detection_sub,

Check failure on line 128 in src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py

View workflow job for this annotation

GitHub Actions / Autopep8

src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py#L121-L128

imgmsg.header.frame_id = msg.header.frame_id self.get_logger().info("Publish img") self.vis_publisher.publish(imgmsg) - + def callback( self, camera_img_sub,
traffic_signs_sub,
traffic_lights_sub
):
cv_image = self.process_img(camera_img_sub)

combined_detections = camera_detection_sub.detections + traffic_lights_sub.detections + traffic_signs_sub.detections

annotator = Annotator(
cv_image,
line_width=self.line_thickness,
example=str(self.names),
)

detections = []
for det in combined_detections:
obj_with_pose = det.results[0]
detections.append(

Check failure on line 145 in src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py

View workflow job for this annotation

GitHub Actions / Autopep8

src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py#L131-L145

): cv_image = self.process_img(camera_img_sub) - combined_detections = camera_detection_sub.detections + traffic_lights_sub.detections + traffic_signs_sub.detections + combined_detections = camera_detection_sub.detections + \ + traffic_lights_sub.detections + traffic_signs_sub.detections annotator = Annotator( cv_image, line_width=self.line_thickness, example=str(self.names), ) - + detections = [] for det in combined_detections: obj_with_pose = det.results[0]
{
"label": obj_with_pose.hypothesis.class_id,
"conf": obj_with_pose.hypothesis.score,
"bbox": [
det.bbox.center.position.x,
det.bbox.center.position.y,
det.bbox.size_x,
det.bbox.size_y
],
}
)
(combined_detections, annotated_img) = self.postprocess_detections(detections, annotator)

self.publish_detections(combined_detections, camera_detection_sub, "")
self.publish_vis(annotated_img, camera_img_sub)

def main(args=None):
rclpy.init(args=args)
node = CameraSyncNode()

rclpy.spin(node)

node.destroy_node()
rclpy.shutdown()

if __name__ == '__main__':
main()
Original file line number Diff line number Diff line change
Expand Up @@ -281,16 +281,16 @@ def image_callback(self, msg):
)
self.get_logger().debug(f"{label}: {bbox}")

annotator = Annotator(
cv_image,
line_width=self.line_thickness,
example=str(self.names),
)
(detections, annotated_img) = self.postprocess_detections(detections, annotator)
# annotator = Annotator(
# cv_image,
# line_width=self.line_thickness,
# example=str(self.names),
# )
# (detections, annotated_img) = self.postprocess_detections(detections, annotator)

# Currently we support a single camera so we pass an empty string
feed = ""
self.publish_vis(annotated_img, msg, feed)
# self.publish_vis(annotated_img, msg, feed)
self.publish_detections(detections, msg, feed)

if self.save_detections:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
left_post_synchronizer_node:
ros__parameters:
camera_img_topic: /camera/left/image_color
camera_detection_topic: /camera/left/camera_detections
traffic_signs_topic: /traffic_signs/left
traffic_lights_topic: /traffic_lights/left
combined_detection_publisher: /camera/left/combined_detections
publish_vis_topic: /camera/left/annotated_img

center_post_synchronizer_node:
ros__parameters:
camera_img_topic: /camera/center/image_color
camera_detection_topic: /camera/center/camera_detections
traffic_signs_topic: /traffic_signs/center
traffic_lights_topic: /traffic_lights/center
combined_detection_publisher: /camera/center/combined_detections
publish_vis_topic: /camera/center/annotated_img

right_post_synchronizer_node:
ros__parameters:
camera_img_topic: /camera/right/image_color
camera_detection_topic: /camera/right/camera_detections
traffic_signs_topic: /traffic_signs/right
traffic_lights_topic: /traffic_lights/right
combined_detection_publisher: /camera/right/combined_detections
publish_vis_topic: /camera/right/annotated_img
Original file line number Diff line number Diff line change
@@ -1,9 +1,31 @@
traffic_light_node:
left_traffic_light_node:
ros__parameters:
camera_topic: /camera/left/image_color
camera_topic: /camera/left/image_color
publish_vis_topic: /traffic_lights_viz
publish_detection_topic: /traffic_lights/left
model_path: /perception_models/traffic_light.pt
crop_mode: CenterCrop
image_size: 1024
save_detections: false

center_traffic_light_node:
ros__parameters:
camera_topic: /camera/center/image_color
publish_vis_topic: /traffic_lights_viz
publish_detection_topic: /traffic_lights/center
model_path: /perception_models/traffic_light.pt
crop_mode: CenterCrop
image_size: 1024
save_detections: false

right_traffic_light_node:
ros__parameters:
camera_topic: /camera/right/image_color
publish_vis_topic: /traffic_lights_viz
publish_detection_topic: /traffic_lights
publish_detection_topic: /traffic_lights/right
model_path: /perception_models/traffic_light.pt
crop_mode: CenterCrop
image_size: 1024
save_detections: false


Original file line number Diff line number Diff line change
@@ -1,9 +1,29 @@
traffic_signs_node:
right_traffic_signs_node:
ros__parameters:
camera_topic: /camera/left/image_color
publish_vis_topic: /traffic_signs_viz
publish_detection_topic: /traffic_signs
model_path: /perception_models/traffic_signs_v1.pt
publish_detection_topic: /traffic_signs/left
model_path: /perception_models/traffic_signs.pt
crop_mode: CenterCrop
image_size: 1024
save_detections: false

center_traffic_signs_node:
ros__parameters:
camera_topic: /camera/center/image_color
publish_vis_topic: /traffic_signs_viz
publish_detection_topic: /traffic_signs/center
model_path: /perception_models/traffic_signs.pt
crop_mode: CenterCrop
image_size: 1024
save_detections: false

right_traffic_signs_node:
ros__parameters:
camera_topic: /camera/right/image_color
publish_vis_topic: /traffic_signs_viz
publish_detection_topic: /traffic_signs/right
model_path: /perception_models/traffic_signs.pt
crop_mode: CenterCrop
image_size: 1024
save_detections: false
7 changes: 7 additions & 0 deletions src/perception/camera_object_detection/launch/eve.launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,18 @@ def generate_launch_description():
condition=LaunchConfigurationEquals("launch_traffic_signs", "True"),
)

post_synchronizer_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
[camera_object_detection_launch_include_dir, "/post_synchronizer.launch.py"]
)
)

return LaunchDescription(
launch_args
+ [
pretrained_yolov8_launch,
traffic_light_launch,
traffic_signs_launch,
post_synchronizer_launch,
]
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from launch import LaunchDescription
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
import os


def generate_launch_description():
config = os.path.join(
get_package_share_directory('camera_object_detection'),
'config',
'post_synchronizer_config.yaml'
)

left_camera_object_detection_node = Node(
package='camera_object_detection',
executable='camera_sync_node',
name='left_post_synchronizer_node',
parameters=[config]
)

center_camera_object_detection_node = Node(
package='camera_object_detection',
executable='camera_sync_node',
name='center_post_synchronizer_node',
parameters=[config]
)

right_camera_object_detection_node = Node(
package='camera_object_detection',
executable='camera_sync_node',
name='right_post_synchronizer_node',
parameters=[config]
)

return LaunchDescription(
[
left_camera_object_detection_node,
center_camera_object_detection_node,
right_camera_object_detection_node
]
)
Loading
Loading