diff --git a/.github/templates/docker_context/docker_context.sh b/.github/templates/docker_context/docker_context.sh index 5e35355f..126b5cb2 100755 --- a/.github/templates/docker_context/docker_context.sh +++ b/.github/templates/docker_context/docker_context.sh @@ -24,6 +24,11 @@ while read -r module; do services=$(docker-compose -f "$module" config --services) module_out=$(echo "$module" | sed -n 's/modules\/docker-compose\.\(.*\)\.yaml/\1/p') + # Skip simulation module + if [[ 'simulation' = $module_out ]]; then + continue + fi + # Only work with modules that are modified if [[ $MODIFIED_MODULES != *$module_out* && $TEST_ALL = "false" ]]; then continue diff --git a/.github/workflows/build_and_unitest.yml b/.github/workflows/build_and_unitest.yml index 12ef538c..214815b1 100644 --- a/.github/workflows/build_and_unitest.yml +++ b/.github/workflows/build_and_unitest.yml @@ -10,7 +10,7 @@ on: jobs: setup-environment: - name: Setup environment + name: Setup Environment runs-on: ubuntu-latest outputs: @@ -29,7 +29,9 @@ jobs: uses: "./.github/templates/check_src_changes" - name: Setup Watod Environment - run: ./watod_scripts/watod-setup-env.sh + run: | + MODULES_DIR="$GITHUB_WORKSPACE/modules" + . ./watod_scripts/watod-setup-env.sh shell: bash - name: Generate Docker Environment @@ -43,7 +45,7 @@ jobs: uses: "./.github/templates/github_context" build-and-unittest: - name: Build Image and Run Unit Testing Suite + name: Build/Test runs-on: ubuntu-latest needs: setup-environment @@ -124,9 +126,7 @@ jobs: ${{ steps.construct-registry-url.outputs.url }}:${{ env.SOURCE_BRANCH }} cache-from: | ${{ steps.construct-registry-url.outputs.url }}:build_${{ env.SOURCE_BRANCH }} - ${{ steps.construct-registry-url.outputs.url }}:${{ env.SOURCE_BRANCH }} - ${{ steps.construct-registry-url.outputs.url }}:${{ env.TARGET_BRANCH }} - cache-to: type=inline + ${{ steps.construct-registry-url.outputs.url }}:build_${{ env.TARGET_BRANCH }} builder: ${{ steps.buildx.outputs.name }} target: deploy diff --git a/.gitignore b/.gitignore index 9127d484..9be06ca7 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ code_server_config/*/start_code_server.sh .env .vscode/ ssh_config/ + +watod-config.local.sh \ No newline at end of file diff --git a/docker/perception/camera_object_detection/camera_object_detection.Dockerfile b/docker/perception/camera_object_detection/camera_object_detection.Dockerfile index da5959bc..5c3ee385 100644 --- a/docker/perception/camera_object_detection/camera_object_detection.Dockerfile +++ b/docker/perception/camera_object_detection/camera_object_detection.Dockerfile @@ -31,7 +31,7 @@ RUN rm requirements.txt # Install Rosdep requirements COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list -RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) +RUN apt-get update && apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) # Copy in source code from source stage WORKDIR ${AMENT_WS} diff --git a/docker/simulation/carla_notebooks/carla_notebooks.Dockerfile b/docker/simulation/carla_notebooks/carla_notebooks.Dockerfile new file mode 100644 index 00000000..2f50b621 --- /dev/null +++ b/docker/simulation/carla_notebooks/carla_notebooks.Dockerfile @@ -0,0 +1,23 @@ +ARG CARLA_VERSION=0.9.13 +FROM carlasim/carla:${CARLA_VERSION} AS wato_carla_api + +FROM python:3.8.16-slim-bullseye +ARG CARLA_VERSION=0.9.13 + +RUN pip3 install carla==${CARLA_VERSION} jupyter tensorflow-probability +RUN apt-get update && apt-get install -y curl git wget unzip && apt remove python3-networkx + +COPY --from=wato_carla_api --chown=root /home/carla/PythonAPI/carla/agents /usr/local/lib/python3.8/site-packages/agents + +WORKDIR /home/bolty/carla_notebooks +COPY src/simulation/carla_notebooks /home/bolty/carla_notebooks + +WORKDIR /home/bolty +# Setup CARLA Scenario Runner +# The last sed command replaces hero (default ego vehicle name) with another ego vehicle name +RUN git clone https://github.com/carla-simulator/scenario_runner.git && \ + cd scenario_runner && pip3 install -r requirements.txt && \ + sed -i s/hero/ego/g /home/bolty/scenario_runner/srunner/tools/scenario_parser.py +WORKDIR /home/bolty + +WORKDIR /home/bolty/carla_notebooks \ No newline at end of file diff --git a/docker/simulation/carla_ros_bridge/carla_ros_bridge.Dockerfile b/docker/simulation/carla_ros_bridge/carla_ros_bridge.Dockerfile new file mode 100644 index 00000000..57d952dc --- /dev/null +++ b/docker/simulation/carla_ros_bridge/carla_ros_bridge.Dockerfile @@ -0,0 +1,109 @@ +ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:foxy-ubuntu20.04 + +ARG CARLA_VERSION=0.9.13 +FROM carlasim/carla:${CARLA_VERSION} AS wato_carla_api + +################################ Source ################################ +FROM ${BASE_IMAGE} as source + +WORKDIR ${AMENT_WS}/src + +# Download ROS Bridge +RUN git clone --depth 1 --branch master --recurse-submodules https://github.com/carla-simulator/ros-bridge.git && \ + cd ros-bridge && \ + git checkout e9063d97ff5a724f76adbb1b852dc71da1dcfeec && \ + cd .. + +# Fix an error in the ackermann_control node +RUN sed -i s/simple_pid.PID/simple_pid.pid/g ./ros-bridge/carla_ackermann_control/src/carla_ackermann_control/carla_ackermann_control_node.py + +# Copy in source code +COPY src/simulation/carla_config carla_config +COPY src/wato_msgs/simulation ros_msgs + +# Scan for rosdeps +RUN apt-get -qq update && rosdep update --rosdistro foxy && \ + rosdep install --from-paths . -r -s \ + | grep 'apt-get install' \ + | awk '{print $3}' \ + | sort > /tmp/colcon_install_list + +################################# Dependencies ################################ +FROM ${BASE_IMAGE} as dependencies + +# Install dependencies +RUN apt-get update && \ + apt-fast install -qq -y --no-install-recommends lsb-release \ + libglu1-mesa-dev xorg-dev \ + software-properties-common \ + build-essential \ + python3-rosdep \ + python3-rospkg \ + python3-colcon-common-extensions \ + python3-pygame \ + ros-$ROS_DISTRO-tf2-geometry-msgs \ + ros-$ROS_DISTRO-tf2-eigen \ + ros-$ROS_DISTRO-ackermann-msgs \ + ros-$ROS_DISTRO-derived-object-msgs \ + ros-$ROS_DISTRO-cv-bridge \ + ros-$ROS_DISTRO-vision-opencv \ + ros-$ROS_DISTRO-rqt-image-view \ + ros-$ROS_DISTRO-rqt-gui-py \ + qt5-default \ + ros-$ROS_DISTRO-pcl-conversions \ + ros-$ROS_DISTRO-resource-retriever \ + ros-$ROS_DISTRO-yaml-cpp-vendor \ + ros-$ROS_DISTRO-urdf \ + ros-$ROS_DISTRO-map-msgs \ + ros-$ROS_DISTRO-laser-geometry \ + ros-$ROS_DISTRO-interactive-markers \ + ros-$ROS_DISTRO-rviz2 + +# Install Rosdep requirements +COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list +RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) + +# Copy in source code from source stage +WORKDIR ${AMENT_WS} +COPY --from=source ${AMENT_WS}/src src + +# Dependency Cleanup +WORKDIR / +RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ + rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* + +################################ Build ################################ +FROM dependencies as build + +ARG CARLA_VERSION + +#Install Python Carla API +COPY --from=wato_carla_api --chown=root /home/carla/PythonAPI/carla /opt/carla/PythonAPI +WORKDIR /opt/carla/PythonAPI +RUN python3.8 -m easy_install pip && \ + pip3 install carla==${CARLA_VERSION} && \ + pip install simple-pid==2.0.0 && \ + pip install transforms3d==0.4.1 && \ + pip install pexpect==4.9.0 && \ + pip install networkx==3.1 + +WORKDIR ${AMENT_WS}/src + +# Build ROS2 packages +WORKDIR ${AMENT_WS} +RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ + colcon build \ + --cmake-args -DCMAKE_BUILD_TYPE=Release + +# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash +COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +ENTRYPOINT ["./wato_ros_entrypoint.sh"] + +################################ Prod ################################ +FROM build as deploy + +# Source Cleanup and Security Setup +RUN chown -R $USER:$USER ${AMENT_WS} +RUN rm -rf src/* + +USER ${USER} diff --git a/docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile b/docker/simulation/carla_sample_node/carla_sample_node.Dockerfile similarity index 78% rename from docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile rename to docker/simulation/carla_sample_node/carla_sample_node.Dockerfile index d91b79e5..b1e68d90 100644 --- a/docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile +++ b/docker/simulation/carla_sample_node/carla_sample_node.Dockerfile @@ -6,8 +6,17 @@ FROM ${BASE_IMAGE} as source WORKDIR ${AMENT_WS}/src # Copy in source code -COPY src/perception/traffic_light_detection traffic_light_detection -COPY src/wato_msgs/sample_msgs sample_msgs +COPY src/simulation/carla_sample_node carla_sample_node +COPY src/wato_msgs/simulation/embedded_msgs embedded_msgs +COPY src/wato_msgs/simulation/path_planning_msgs path_planning_msgs + +# Carla specific messages + +RUN git clone --depth 1 https://github.com/ros-drivers/ackermann_msgs.git --branch 2.0.2 + +RUN git clone --depth 1 https://github.com/ros-perception/image_common.git --branch 3.1.8 + +RUN git clone --depth 1 https://github.com/carla-simulator/ros-carla-msgs.git --branch 1.3.0 # Scan for rosdeps RUN apt-get -qq update && rosdep update && \ diff --git a/docker/simulation/carla_sim/carla_sim.Dockerfile b/docker/simulation/carla_sim/carla_sim.Dockerfile deleted file mode 100644 index c49f8358..00000000 --- a/docker/simulation/carla_sim/carla_sim.Dockerfile +++ /dev/null @@ -1,55 +0,0 @@ -ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 - -################################ Source ################################ -FROM ${BASE_IMAGE} as source - -WORKDIR ${AMENT_WS}/src - -# Copy in source code -COPY src/simulation/carla_sim carla_sim -COPY src/wato_msgs/sample_msgs sample_msgs - -# Scan for rosdeps -RUN apt-get -qq update && rosdep update && \ - rosdep install --from-paths . --ignore-src -r -s \ - | grep 'apt-get install' \ - | awk '{print $3}' \ - | sort > /tmp/colcon_install_list - -################################# Dependencies ################################ -FROM ${BASE_IMAGE} as dependencies - -# Install Rosdep requirements -COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list -RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) - -# Copy in source code from source stage -WORKDIR ${AMENT_WS} -COPY --from=source ${AMENT_WS}/src src - -# Dependency Cleanup -WORKDIR / -RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* - -################################ Build ################################ -FROM dependencies as build - -# Build ROS2 packages -WORKDIR ${AMENT_WS} -RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ - colcon build \ - --cmake-args -DCMAKE_BUILD_TYPE=Release - -# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash -COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh -ENTRYPOINT ["./wato_ros_entrypoint.sh"] - -################################ Prod ################################ -FROM build as deploy - -# Source Cleanup and Security Setup -RUN chown -R $USER:$USER ${AMENT_WS} -RUN rm -rf src/* - -USER ${USER} diff --git a/docker/simulation/carla_viz/carla_viz.Dockerfile b/docker/simulation/carla_viz/carla_viz.Dockerfile new file mode 100644 index 00000000..f8a03c9e --- /dev/null +++ b/docker/simulation/carla_viz/carla_viz.Dockerfile @@ -0,0 +1,14 @@ +FROM mjxu96/carlaviz:0.9.13 + +ENV CARLAVIZ_BACKEND_HOST localhost +ENV CARLAVIZ_BACKEND_PORT 8081 +ENV CARLA_SERVER_HOST localhost +ENV CARLA_SERVER_PORT 2000 + +WORKDIR /home/carla/carlaviz + +COPY docker/simulation/carla_viz/carlaviz_entrypoint.sh /home/carla/carlaviz/docker/carlaviz_entrypoint.sh + +RUN chmod +x ./docker/carlaviz_entrypoint.sh + +ENTRYPOINT ["/bin/bash", "-c", "./docker/carlaviz_entrypoint.sh > /dev/null 2>&1"] \ No newline at end of file diff --git a/docker/simulation/carla_viz/carlaviz_entrypoint.sh b/docker/simulation/carla_viz/carlaviz_entrypoint.sh new file mode 100644 index 00000000..dbae6309 --- /dev/null +++ b/docker/simulation/carla_viz/carlaviz_entrypoint.sh @@ -0,0 +1,53 @@ + +is_backend_up="" + +function wait_for_backend_up() { + max_wait_time=5 + for ((i=0; i<=$max_wait_time; ++i)) do + cat output.txt | grep -q "Connected to Carla Server" + if [ $? == 0 ]; then + is_backend_up="1" + break + fi + sleep 1 + done +} + +function cleanup_backend() { + backend_pid=$(pidof backend) + kill -9 $backend_pid + echo "Killed Backend process $backend_pid" +} + +echo -e "CARLAVIZ_BACKEND_HOST=${CARLAVIZ_BACKEND_HOST}" >> /home/carla/.env +echo -e "CARLAVIZ_BACKEND_PORT=${CARLAVIZ_BACKEND_PORT}" >> /home/carla/.env + +echo "Make sure you have launched the Carla server." +echo "Launching backend." +./backend/bin/backend ${CARLA_SERVER_HOST} ${CARLA_SERVER_PORT} |& tee output.txt & +wait_for_backend_up +if [[ -z "$is_backend_up" ]]; then + echo "Backend is not launched. Please check if you have already started the Carla server." + cleanup_backend + exit 1 +fi + +echo "Backend launched." + +echo "Launching frontend" +# enable nginx +service nginx restart +echo "Frontend launched. Please open your browser" +sleep 10 +sed -i s/:8081/:$CARLAVIZ_BACKEND_PORT/g /var/www/carlaviz/bundle.js + +while [ "$is_backend_up" = "1" ] +do + cat output.txt | grep -q "time-out of" + if [ $? == 0 ]; then + is_backend_up="" + cleanup_backend + exit 1 + fi + sleep 5 +done \ No newline at end of file diff --git a/docs/carla.md b/docs/carla.md new file mode 100644 index 00000000..11a1f487 --- /dev/null +++ b/docs/carla.md @@ -0,0 +1,67 @@ +# CARLA Setup in Monorepo +CARLA is an open-source autonomous driving simulator based on Unreal Engine 4. The primary ways of interacting with the CARLA setup are through the Python API or with ROS2 (via the CARLA ROS Bridge). Both of these methods are explained in greater detail further down in this document. + +The goal of the CARLA setup is to provide an easy way for WATonomous members to interact with the simulator without having to setup anything themselves. So, if you have any questions or suggestions regarding any part of the CARLA setup (or questions about CARLA in general) please bring them up in Discord in #simulation-general or contact Vishal Jayakumar (masteroooogway on Discord or at [v3jayaku@watonomous.ca](mailto:v3jayaku@watonomous.ca)). + +- [CARLA WATonomous Documentation](#using-carla-setup-in-monorepo) + - [Initial Setup](#initial-setup) + - [Interacting with CARLA using the Python API](#interacting-with-carla-using-the-python-api) + - [Using a ROS Node to interact with CARLA](#using-a-ros-node-to-interact-with-carla) + - [CARLA Visualization using Foxglove Studio](#carla-visualization-using-foxglove-studio) + - [CARLA Visualization using CarlaViz](#carla-visualization-using-carlaviz) + - [FAQ](#faq) + - [CARLA is running very slow (approx. 3 fps)](#carla-is-running-very-slow-approx-3-fps) + +**Make sure you are confortable with navigating the monorepo before reading** + +## Initial Setup + +To run CARLA and all associated containers first add `simulation` as an `ACTIVE_MODULE` in `watod-config.sh`. This will cause the following containers to launch when `watod up` is run: `carla_server`, `carla_viz`, `carla_ros_bridge`, `carla_notebooks`, `carla_sample_node`. + +## Interacting with CARLA using the Python API + +This is the simplest way of interacting with CARLA and most importantly does not require the usage or knowledge of ROS. The documentation below will only show the setup procedure for using the CARLA Python API in the WATOnomous server (via Jupyter Notebook). The full CARLA Python API documentation can be found here ([CARLA Python API Documentation](https://carla.readthedocs.io/en/0.9.13/python_api/)). + +**On the WATOnomous Server** + +Forward the port specified by the variable `CARLA_NOTEBOOKS_PORT` (which can be found in the `.env` file in the `modules` folder once `watod up` is run) using either the `ports` section of VS Code or by running the command `ssh -L 8888:localhost:8888 @-ubuntu1.watocluster.local` on your local machine (replace 8888 with the port specified in the `CARLA_NOTEBOOKS_PORT` variable). If the port is auto-forwarded by VS Code then you do not need to add it manually. + +**On your local machine (your personal laptop/pc)** + +Open any web browser (Chrome, Firefox, Edge etc.) and type `localhost:8888` (replace 8888 with the port specified in the `CARLA_NOTEBOOKS_PORT` variable) and click enter. You should be automatically redirected to the Jupyter Notebook home screen where you should see the file `carla_notebooks.ipynb`. Open that file and follow the instructions in it. Either edit the `carla_notebooks.ipynb` file directly or create a new file and copy the first code block in `carla_notebooks.ipynb`. + +## Using a ROS Node to interact with CARLA + +**Ensure that you have a good understanding of ROS and writing ROS nodes before proceeding** + +Currently there is a CARLA sample node setup (written in Python) shows how to publish a message to enable (and keep enabled) autopilot, subsribe to the GNSS sensor topic and output the data to the console. What is probably most helpful from the sample node is the Dockerfile located in `wato_monorepo_v2/docker/simulation/carla_sample_node/carla_sample_node.Dockerfile`, where you can find all the CARLA related messages that should be installed when building your own ROS nodes that interact with CARLA. + +## CARLA Visualization using Foxglove Studio + +Foxglove Studio is a tool to visualize and interact with ROS messages. Using Foxglove Studio, data such as Camera, LiDAR, Radar etc. can be visualized. Foxglove Studio also allows for messages to be published to topics, for example to enable autopilot or set vehicle speed. The documentation below will only show the setup procedure for using Foxglove Studio with the WATOnomous server. Further documentation regarding how to use Foxglove Studio and all its features can be found here ([Foxglove Studio Documentation](https://foxglove.dev/docs/studio)) + +**On the WATOnomous Server** + +Add `vis_tools` as an `ACTIVE_PROFILE`. This will launch the `foxglove.Dockerfile` container with an open port when `watod up` is ran. + +It exposes the port specified by the `FOXGLOVE_BRIDGE_PORT` variable, which will be defined in the `.env` file in the `profiles` folder (the `.env` file is populated after `watod up` is run). This port may be auto-forwarded by VS Code automatically but if not the port will need to be forwarded manually to your local machine. This can either be done in the `ports` section of VS Code or by running the command `ssh -L 8765:localhost:8765 @-ubuntu1.watocluster.local` on your local machine (replace 8765 with the port specified in the `FOXGLOVE_BRIDGE_PORT` variable). + +**On your local machine (your personal laptop/pc)** + +Open Foxglove Studio on your local machine using either the desktop app ([Download Here](https://foxglove.dev/studio)) or the [Web Application](https://studio.foxglove.dev/) (only supported on Chrome). Click on Open Connection then replace the default port (8765) with the port you specified in the `FOXGLOVE_BRIDGE_PORT` variable. Then click the Open button and after a few seconds Foxglove Studio should connect. You should now be able to access any of the topic being published or subscribed by the CARLA ROS Bridge. + +## CARLA Visualization using CarlaViz + +**On the WATOnomous Server** + +CarlaViz is a visualization tool that is useful when you are only using the CARLA Python API through a Jupyter Notebook and you don't want the overhead that comes with the ROS Bridge + Foxglove method of visualization. To use CarlaViz forward the ports specified by the variables `CARLAVIZ_PORT` and `CARLAVIZ_PORT_2` defined in the `.env` file in the `profiles` folder (make sure you forward **both** ports). If you want to stop the ROS Bridge from running (for the reasons I mentioned in the previous sentences) then make sure to comment out the `carla_ros_bridge` service in the `docker-compose.carla.yaml` file in the `profiles` folder. + +**On your local machine (your personal laptop/pc)** + +In any browser go to `localhost:8081` (replace 8081 with the port specified in the `CARLAVIZ_PORT` variable) and after waiting 10 seconds or so CarlaViz should appear. + +## FAQ + +### CARLA is running very slow (approx. 3 fps) + +This is expected. The ROS bridge causes CARLA to render all the sensor data which slows down the simulation considerably. While this may be annoying when viewing real-time camera output or trying to control the car manual, the simulation is still running accurately. If you wish to replay scenes with a configurable playback speed, check out the docs on [CARLA's recorder](https://carla.readthedocs.io/en/0.9.13/adv_recorder/) functionality. \ No newline at end of file diff --git a/docs/dev/how_to_dev.md b/docs/dev/how_to_dev.md index 409c5ae2..0719e966 100644 --- a/docs/dev/how_to_dev.md +++ b/docs/dev/how_to_dev.md @@ -92,7 +92,7 @@ services: - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}-${CACHE_FROM_TAG}" - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}-develop" # name of the image made by the dockerfile (boilerplate, but with name change) - image: "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}-${TAG}" + image: "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}-build_${TAG}" # deals with permission and ownership in the container (boilerplate) user: ${FIXUID:?}:${FIXGID:?} # IMPORTANT: mounts your ROS2 node into the container so that changes in the dockerfile are reflected in your diff --git a/modules/dev_overrides/docker-compose.perception.yaml b/modules/dev_overrides/docker-compose.perception.yaml index 31b41896..07e28ec2 100644 --- a/modules/dev_overrides/docker-compose.perception.yaml +++ b/modules/dev_overrides/docker-compose.perception.yaml @@ -32,15 +32,6 @@ services: volumes: - ${MONO_DIR}/src/perception/lidar_object_detection:/home/bolty/ament_ws/src/lidar_object_detection - traffic_light_detection: - <<: *fixuid - extends: - file: ../docker-compose.perception.yaml - service: traffic_light_detection - command: tail -F anything - volumes: - - ${MONO_DIR}/src/perception/traffic_light_detection:/home/bolty/ament_ws/src/traffic_light_detection - traffic_sign_detection: <<: *fixuid extends: diff --git a/modules/dev_overrides/docker-compose.simulation.yaml b/modules/dev_overrides/docker-compose.simulation.yaml index 04730356..2da94571 100644 --- a/modules/dev_overrides/docker-compose.simulation.yaml +++ b/modules/dev_overrides/docker-compose.simulation.yaml @@ -6,10 +6,35 @@ x-fixuid: &fixuid services: carla_sim: - <<: *fixuid extends: file: ../docker-compose.simulation.yaml service: carla_sim + + carla_ros_bridge: + <<: *fixuid + extends: + file: ../docker-compose.simulation.yaml + service: carla_ros_bridge + command: tail -F anything + volumes: + - ${MONO_DIR}/src/simulation/carla_config:/home/bolty/ament_ws/src/carla_config + # command: /bin/bash -c "ros2 launch carla_config carla.launch.py" + + carla_viz: + extends: + file: ../docker-compose.simulation.yaml + service: carla_viz + + carla_sample_node: + <<: *fixuid + extends: + file: ../docker-compose.simulation.yaml + service: carla_sample_node command: tail -F anything volumes: - - ${MONO_DIR}/src/simulation/carla_sim:/home/bolty/ament_ws/src/carla_sim + - ${MONO_DIR}/src/simulation/carla_sample_node:/home/bolty/ament_ws/src/carla_sample_node + + carla_notebooks: + extends: + file: ../docker-compose.simulation.yaml + service: carla_notebooks diff --git a/modules/docker-compose.action.yaml b/modules/docker-compose.action.yaml index ab4743dd..ab73667e 100644 --- a/modules/docker-compose.action.yaml +++ b/modules/docker-compose.action.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/action/global_planning/global_planning.Dockerfile cache_from: - - "${ACTION_GLOBAL_PLANNING_IMAGE}:${TAG}" - - "${ACTION_GLOBAL_PLANNING_IMAGE}:main" + - "${ACTION_GLOBAL_PLANNING_IMAGE}:build_${TAG}" + - "${ACTION_GLOBAL_PLANNING_IMAGE}:build_main" target: deploy image: "${ACTION_GLOBAL_PLANNING_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch global_planning global_planning.launch.py" @@ -17,8 +17,8 @@ services: context: .. dockerfile: docker/action/behaviour_planning/behaviour_planning.Dockerfile cache_from: - - "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:${TAG}" - - "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:main" + - "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:build_${TAG}" + - "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:build_main" target: deploy image: "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch behaviour_planning behaviour_planning.launch.py" @@ -28,8 +28,8 @@ services: context: .. dockerfile: docker/action/local_planning/local_planning.Dockerfile cache_from: - - "${ACTION_LOCAL_PLANNING_IMAGE}:${TAG}" - - "${ACTION_LOCAL_PLANNING_IMAGE}:main" + - "${ACTION_LOCAL_PLANNING_IMAGE}:build_${TAG}" + - "${ACTION_LOCAL_PLANNING_IMAGE}:build_main" target: deploy image: "${ACTION_LOCAL_PLANNING_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch local_planning local_planning.launch.py" @@ -39,8 +39,8 @@ services: context: .. dockerfile: docker/action/model_predictive_control/model_predictive_control.Dockerfile cache_from: - - "${ACTION_MPC_IMAGE}:${TAG}" - - "${ACTION_MPC_IMAGE}:main" + - "${ACTION_MPC_IMAGE}:build_${TAG}" + - "${ACTION_MPC_IMAGE}:build_main" target: deploy image: "${ACTION_MPC_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch model_predictive_control model_predictive_control.launch.py" diff --git a/modules/docker-compose.infrastructure.yaml b/modules/docker-compose.infrastructure.yaml index e8db67b9..63adceef 100644 --- a/modules/docker-compose.infrastructure.yaml +++ b/modules/docker-compose.infrastructure.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/infrastructure/foxglove/foxglove.Dockerfile cache_from: - - "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:${TAG}" - - "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:main" + - "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:build_${TAG}" + - "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:build_main" target: deploy image: "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:${TAG}" # command: ["ros2", "launch", "foxglove_bridge", "foxglove_bridge_launch.xml", "port:=${FOXGLOVE_BRIDGE_PORT:?}"] @@ -20,8 +20,8 @@ services: # context: .. # dockerfile: docker/infrastructure/vnc/vnc.Dockerfile # cache_from: - # - "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:${TAG}" - # - "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:main" + # - "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:build_${TAG}" + # - "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:build_main" # image: "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:${TAG}" # ports: # - "${GUI_TOOLS_VNC_PORT:?}:5900" @@ -33,8 +33,8 @@ services: context: .. dockerfile: docker/infrastructure/data_stream/data_stream.Dockerfile cache_from: - - "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:${TAG}" - - "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:main" + - "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:build_${TAG}" + - "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:build_main" target: deploy image: "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:${TAG}" volumes: diff --git a/modules/docker-compose.interfacing.yaml b/modules/docker-compose.interfacing.yaml index 0024ef07..51c39102 100644 --- a/modules/docker-compose.interfacing.yaml +++ b/modules/docker-compose.interfacing.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/interfacing/sensor_interfacing/sensor_interfacing.Dockerfile cache_from: - - "${INTERFACING_SENSOR_IMAGE}:${TAG}" - - "${INTERFACING_SENSOR_IMAGE}:main" + - "${INTERFACING_SENSOR_IMAGE}:build_${TAG}" + - "${INTERFACING_SENSOR_IMAGE}:build_main" target: deploy image: "${INTERFACING_SENSOR_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch sensor_interfacing sensor_interfacing.launch.py" @@ -17,8 +17,8 @@ services: context: .. dockerfile: docker/interfacing/can_interfacing/can_interfacing.Dockerfile cache_from: - - "${INTERFACING_CAN_IMAGE}:${TAG}" - - "${INTERFACING_CAN_IMAGE}:main" + - "${INTERFACING_CAN_IMAGE}:build_${TAG}" + - "${INTERFACING_CAN_IMAGE}:build_main" target: deploy image: "${INTERFACING_CAN_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch can_interfacing can_interfacing.launch.py" diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index f3434499..46069e2a 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -31,6 +31,7 @@ services: command: /bin/bash -c "ros2 launch camera_object_detection eve_launch.py" volumes: - /mnt/wato-drive2/perception_models/yolov8m.pt:/perception_models/yolov8m.pt + - /mnt/wato-drive2/perception_models/traffic_light.pt:/perception_models/traffic_light.pt lidar_object_detection: build: @@ -42,16 +43,6 @@ services: target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch lidar_object_detection lidar_object_detection.launch.py" - traffic_light_detection: - build: - context: .. - dockerfile: docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile - cache_from: - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:build_${TAG}" - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:build_main" - target: deploy - image: "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch traffic_light_detection traffic_light_detection.launch.py" traffic_sign_detection: build: @@ -69,8 +60,8 @@ services: context: .. dockerfile: docker/perception/semantic_segmentation/semantic_segmentation.Dockerfile cache_from: - - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" - - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:main" + - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:build_${TAG}" + - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:build_main" target: deploy image: "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch semantic_segmentation semantic_segmentation.launch.py" diff --git a/modules/docker-compose.samples.yaml b/modules/docker-compose.samples.yaml index f094d080..38cd838a 100644 --- a/modules/docker-compose.samples.yaml +++ b/modules/docker-compose.samples.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/samples/cpp_aggregator/cpp_aggregator.Dockerfile cache_from: - - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:${TAG}" - - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:main" + - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:build_${TAG}" + - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:build_main" target: deploy image: "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:${TAG}" command: /bin/bash -c "ros2 launch aggregator aggregator.launch.py" @@ -17,8 +17,9 @@ services: # context: .. # dockerfile: docker/samples/py_aggregator/py_aggregator.Dockerfile # cache_from: - # - "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:${TAG}" - # - "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:main" + # - "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:build_${TAG}" + # - "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:build_main" + # target: deploy # image: "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:${TAG}" # command: /bin/bash -c "ros2 launch aggregator aggregator.launch.py" @@ -27,8 +28,9 @@ services: # context: .. # dockerfile: docker/samples/cpp_producer/cpp_producer.Dockerfile # cache_from: - # - "${SAMPLES_CPP_PRODUCER_IMAGE:?}:${TAG}" - # - "${SAMPLES_CPP_PRODUCER_IMAGE:?}:main" + # - "${SAMPLES_CPP_PRODUCER_IMAGE:?}:build_${TAG}" + # - "${SAMPLES_CPP_PRODUCER_IMAGE:?}:build_main" + # target: deploy # image: "${SAMPLES_CPP_PRODUCER_IMAGE:?}:${TAG}" # command: /bin/bash -c "ros2 launch producer producer.launch.py" @@ -37,8 +39,8 @@ services: context: .. dockerfile: docker/samples/py_producer/py_producer.Dockerfile cache_from: - - "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:${TAG}" - - "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:main" + - "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:build_${TAG}" + - "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:build_main" target: deploy image: "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:${TAG}" command: /bin/bash -c "ros2 launch producer producer.launch.py" @@ -48,8 +50,9 @@ services: # context: .. # dockerfile: docker/samples/cpp_transformer/cpp_transformer.Dockerfile # cache_from: - # - "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:${TAG}" - # - "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:main" + # - "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:build_${TAG}" + # - "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:build_main" + # target: deploy # image: "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:${TAG}" # command: /bin/bash -c "ros2 launch transformer transformer.launch.py" @@ -58,8 +61,8 @@ services: context: .. dockerfile: docker/samples/py_transformer/py_transformer.Dockerfile cache_from: - - "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:${TAG}" - - "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:main" + - "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:build_${TAG}" + - "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:build_main" target: deploy image: "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:${TAG}" command: /bin/bash -c "ros2 launch transformer transformer.launch.py" diff --git a/modules/docker-compose.simulation.yaml b/modules/docker-compose.simulation.yaml index ceba4f8b..241f4567 100644 --- a/modules/docker-compose.simulation.yaml +++ b/modules/docker-compose.simulation.yaml @@ -2,12 +2,76 @@ version: "3.8" services: carla_sim: + image: carlasim/carla:0.9.13 + environment: + - DISPLAY=1 + - CUDA_VISIBLE_DEVICES=0,1,2 + - NVIDIA_VISIBLE_DEVICES=0,1,2 + runtime: nvidia + restart: always + command: /bin/bash -c "./CarlaUE4.sh -nosound -carla-server -RenderOffscreen -world-port=2000 -quality-level=Low" + + carla_ros_bridge: build: context: .. - dockerfile: docker/perception/carla_sim/carla_sim.Dockerfile + dockerfile: docker/simulation/carla_ros_bridge/carla_ros_bridge.Dockerfile cache_from: - - "${SIMULATION_CARLA_IMAGE:?}:${TAG}" - - "${SIMULATION_CARLA_IMAGE:?}:main" + - "${SIMULATION_CARLA_ROS_BRIDGE_IMAGE:?}:build_${TAG}" + - "${SIMULATION_CARLA_ROS_BRIDGE_IMAGE:?}:build_main" target: deploy - image: "${SIMULATION_CARLA_IMAGE:?}:${TAG}" - command: /bin/bash -c "ros2 launch carla_sim carla_sim.launch.py" + image: "${SIMULATION_CARLA_ROS_BRIDGE_IMAGE}:${TAG}" + environment: + - CARLA_HOSTNAME=${COMPOSE_PROJECT_NAME:?}-carla_sim-1 + - USE_ACKERMANN_CONTROL=False + # command: /bin/bash -c "echo CARLA_ROS_BRIDGE disabled" + command: /bin/bash -c "ros2 launch carla_config carla.launch.py" + + carla_viz: + build: + context: .. + dockerfile: docker/simulation/carla_viz/carla_viz.Dockerfile + cache_from: + - "${SIMULATION_CARLAVIZ_IMAGE:?}:build_${TAG}" + - "${SIMULATION_CARLAVIZ_IMAGE:?}:build_main" + image: "${SIMULATION_CARLAVIZ_IMAGE:?}:${TAG}" + ports: + - ${CARLAVIZ_PORT}:8080 + - ${CARLAVIZ_PORT_2}:8081 + environment: + - CARLA_SERVER_HOST=${COMPOSE_PROJECT_NAME:?}-carla_sim-1 + - CARLAVIZ_BACKEND_PORT=${CARLAVIZ_PORT_2} + entrypoint: ["/bin/bash", "-c", "./docker/carlaviz_entrypoint.sh > /dev/null 2>&1"] + restart: always + + carla_sample_node: + build: + context: .. + dockerfile: docker/simulation/carla_sample_node/carla_sample_node.Dockerfile + cache_from: + - "${SIMULATION_CARLA_SAMPLE_NODE_IMAGE:?}:build_${TAG}" + - "${SIMULATION_CARLA_SAMPLE_NODE_IMAGE:?}:build_main" + target: deploy + image: "${SIMULATION_CARLA_SAMPLE_NODE_IMAGE:?}:${TAG}" + # command: /bin/bash -c "echo CARLA_SAMPLE_NODE disabled" + command: /bin/bash -c "ros2 launch carla_sample_node carla_sample_node.launch.py publish_autopilot:='False'" + + carla_notebooks: + build: + context: .. + dockerfile: docker/simulation/carla_notebooks/carla_notebooks.Dockerfile + cache_from: + - "${SIMULATION_CARLA_NOTEBOOKS_IMAGE:?}:build_${TAG}" + - "${SIMULATION_CARLA_NOTEBOOKS_IMAGE:?}:build_main" + image: "${SIMULATION_CARLA_NOTEBOOKS_IMAGE:?}:${TAG}" + ports: + - ${CARLA_NOTEBOOKS_PORT:?}:${CARLA_NOTEBOOKS_PORT:?} + environment: + - CLIENT_NAME=${COMPOSE_PROJECT_NAME:?}-carla_sim-1 + - SCENARIO_RUNNER_ROOT=/home/bolty/scenario_runner + - DISPLAY=1 + - CUDA_VISIBLE_DEVICES=0 + - NVIDIA_VISIBLE_DEVICES=1 + container_name: ${COMPOSE_PROJECT_NAME:?}_carla_notebooks + volumes: + - ${MONO_DIR}/src/simulation/carla_notebooks:/home/bolty/carla_notebooks + command: jupyter notebook --allow-root --ip=0.0.0.0 --port=${CARLA_NOTEBOOKS_PORT:?} --no-browser --ServerApp.token='' --ServerApp.password='' diff --git a/modules/docker-compose.world_modeling.yaml b/modules/docker-compose.world_modeling.yaml index 8bd6889d..c70e4cf2 100644 --- a/modules/docker-compose.world_modeling.yaml +++ b/modules/docker-compose.world_modeling.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/world_modeling/hd_map/hd_map.Dockerfile cache_from: - - "${WORLD_MODELING_HD_MAP_IMAGE}:${TAG}" - - "${WORLD_MODELING_HD_MAP_IMAGE}:main" + - "${WORLD_MODELING_HD_MAP_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_HD_MAP_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_HD_MAP_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch hd_map hd_map.launch.py" @@ -17,8 +17,8 @@ services: context: .. dockerfile: docker/world_modeling/localization/localization.Dockerfile cache_from: - - "${WORLD_MODELING_LOCALIZATION_IMAGE}:${TAG}" - - "${WORLD_MODELING_LOCALIZATION_IMAGE}:main" + - "${WORLD_MODELING_LOCALIZATION_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_LOCALIZATION_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_LOCALIZATION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch localization localization.launch.py" @@ -28,8 +28,8 @@ services: context: .. dockerfile: docker/world_modeling/occupancy/occupancy.Dockerfile cache_from: - - "${WORLD_MODELING_OCCUPANCY_IMAGE}:${TAG}" - - "${WORLD_MODELING_OCCUPANCY_IMAGE}:main" + - "${WORLD_MODELING_OCCUPANCY_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_OCCUPANCY_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_OCCUPANCY_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch occupancy occupancy.launch.py" @@ -39,8 +39,8 @@ services: context: .. dockerfile: docker/world_modeling/occupancy_segmentation/occupancy_segmentation.Dockerfile cache_from: - - "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:${TAG}" - - "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:main" + - "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch occupancy_segmentation occupancy_segmentation.launch.py" @@ -50,8 +50,8 @@ services: context: .. dockerfile: docker/world_modeling/motion_forecasting/motion_forecasting.Dockerfile cache_from: - - "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:${TAG}" - - "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:main" + - "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch motion_forecasting motion_forecasting.launch.py" diff --git a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py index b5ad2ea8..05ceb431 100755 --- a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py +++ b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py @@ -1,5 +1,6 @@ import rclpy from rclpy.node import Node +import os from sensor_msgs.msg import Image, CompressedImage from vision_msgs.msg import ( @@ -9,7 +10,7 @@ ) from ultralytics.nn.autobackend import AutoBackend -from ultralytics.data.augment import LetterBox +from ultralytics.data.augment import LetterBox, CenterCrop from ultralytics.utils.ops import non_max_suppression from ultralytics.utils.plotting import Annotator, colors @@ -34,21 +35,27 @@ def __init__(self): self.declare_parameter("camera_topic", "/camera/right/image_color") self.declare_parameter("publish_vis_topic", "/annotated_img") self.declare_parameter("publish_detection_topic", "/detections") - self.declare_parameter("model_path", "/perception_models/yolov8s.pt") - self.declare_parameter("image_size", 480) + self.declare_parameter("model_path", "/perception_models/yolov8m.pt") + self.declare_parameter("image_size", 1024) self.declare_parameter("compressed", False) + self.declare_parameter("crop_mode", "LetterBox") + self.declare_parameter("save_detections", False) self.camera_topic = self.get_parameter("camera_topic").value self.publish_vis_topic = self.get_parameter("publish_vis_topic").value - self.publish_detection_topic = self.get_parameter( - "publish_detection_topic").value + self.publish_detection_topic = self.get_parameter("publish_detection_topic").value self.model_path = self.get_parameter("model_path").value self.image_size = self.get_parameter("image_size").value self.compressed = self.get_parameter("compressed").value + self.crop_mode = self.get_parameter("crop_mode").value + self.save_detections = bool(self.get_parameter("save_detections").value) + self.counter = 0 # For saving detections + if self.save_detections: + if not os.path.exists("detections"): + os.makedirs("detections") self.line_thickness = 1 self.half = False - self.augment = False self.subscription = self.create_subscription( Image if not self.compressed else CompressedImage, @@ -61,9 +68,11 @@ def __init__(self): ), ) + self.orig_image_width = None + self.orig_image_height = None + # set device - self.device = torch.device( - "cuda" if torch.cuda.is_available() else "cpu") + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if torch.cuda.is_available(): self.get_logger().info("Using GPU for inference") else: @@ -73,24 +82,69 @@ def __init__(self): self.cv_bridge = CvBridge() # load yolov8 model - self.model = AutoBackend( - self.model_path, device=self.device, dnn=False, fp16=False) + self.model = AutoBackend(self.model_path, device=self.device, dnn=False, fp16=False) - self.names = self.model.module.names if hasattr( - self.model, "module") else self.model.names + self.names = self.model.module.names if hasattr(self.model, "module") else self.model.names self.stride = int(self.model.stride) # setup vis publishers - self.vis_publisher = self.create_publisher( - Image, self.publish_vis_topic, 10) + self.vis_publisher = self.create_publisher(Image, self.publish_vis_topic, 10) self.detection_publisher = self.create_publisher( - Detection2DArray, self.publish_detection_topic, 10) + Detection2DArray, self.publish_detection_topic, 10 + ) self.get_logger().info( - f"Successfully created node listening on camera topic: {self.camera_topic}...") + f"Successfully created node listening on camera topic: {self.camera_topic}..." + ) + + def crop_image(self, cv_image): + if self.crop_mode == "LetterBox": + img = LetterBox(self.image_size, stride=self.stride)(image=cv_image) + elif self.crop_mode == "CenterCrop": + img = CenterCrop(self.image_size)(cv_image) + else: + raise Exception("Invalid crop mode, please choose either 'LetterBox' or 'CenterCrop'!") + + return img + + def convert_bboxes_to_orig_frame(self, bbox): + """ + Converts bounding box coordinates from the scaled image frame back to the original image frame. + + This function takes into account the original image dimensions and the scaling method used + (either "LetterBox" or "CenterCrop") to accurately map the bounding box coordinates back to + their original positions in the original image. + + Parameters: + bbox (list): A list containing the bounding box coordinates in the format [x1, y1, w1, h1] + in the scaled image frame. + + Returns: + list: A list containing the bounding box coordinates in the format [x1, y1, w1, h1] + in the original image frame. - def preprocess_image(self, cv_image): + """ + width_scale = self.orig_image_width / self.image_size + height_scale = self.orig_image_height / self.image_size + if self.crop_mode == "LetterBox": + translation = (self.image_size - self.orig_image_height / width_scale) / 2 + return [ + bbox[0] * width_scale, + (bbox[1] - translation) * width_scale, + bbox[2] * width_scale, + bbox[3] * width_scale, + ] + elif self.crop_mode == "CenterCrop": + translation = (self.orig_image_width / height_scale - self.image_size) / 2 + return [ + (bbox[0] + translation) * height_scale, + bbox[1] * height_scale, + bbox[2] * height_scale, + bbox[3] * height_scale, + ] + + def crop_and_convert_to_tensor(self, cv_image): """ Preprocess the image by resizing, padding and rearranging the dimensions. @@ -100,9 +154,7 @@ def preprocess_image(self, cv_image): Returns: torch.Tensor image for model input of shape (1,3,w,h) """ - # Padded resize - img = cv_image - img = LetterBox(self.image_size, stride=self.stride)(image=cv_image) + img = self.crop_image(cv_image) # Convert img = img.transpose(2, 0, 1) @@ -143,10 +195,11 @@ def postprocess_detections(self, detections, annotator): annotator_img = annotator.result() return (processed_detections, annotator_img) - def publish_vis(self, annotated_img, feed): + def publish_vis(self, annotated_img, msg, feed): # Publish visualizations imgmsg = self.cv_bridge.cv2_to_imgmsg(annotated_img, "bgr8") - imgmsg.header.frame_id = "camera_{}_link".format(feed) + imgmsg.header.stamp = msg.header.stamp + imgmsg.header.frame_id = msg.header.frame_id self.vis_publisher.publish(imgmsg) def publish_detections(self, detections, msg, feed): @@ -175,10 +228,13 @@ def publish_detections(self, detections, msg, feed): detection2darray.detections.append(detection2d) self.detection_publisher.publish(detection2darray) - return def image_callback(self, msg): self.get_logger().debug("Received image") + if self.orig_image_width is None: + self.orig_image_width = msg.width + self.orig_image_height = msg.height + images = [msg] # msg is a single sensor image startTime = time.time() for image in images: @@ -189,16 +245,13 @@ def image_callback(self, msg): cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) else: try: - cv_image = self.cv_bridge.imgmsg_to_cv2( - image, desired_encoding="passthrough") + cv_image = self.cv_bridge.imgmsg_to_cv2(image, desired_encoding="passthrough") except CvBridgeError as e: self.get_logger().error(str(e)) return # preprocess image and run through prediction - img = self.preprocess_image(cv_image) - processed_cv_image = LetterBox( - self.image_size, stride=self.stride)(image=cv_image) + img = self.crop_and_convert_to_tensor(cv_image) pred = self.model(img) # nms function used same as yolov8 detect.py @@ -217,6 +270,7 @@ def image_callback(self, msg): xyxy[3] - xyxy[1], ] bbox = [b.item() for b in bbox] + bbox = self.convert_bboxes_to_orig_frame(bbox) detections.append( { @@ -228,20 +282,24 @@ def image_callback(self, msg): self.get_logger().debug(f"{label}: {bbox}") annotator = Annotator( - processed_cv_image, + cv_image, line_width=self.line_thickness, example=str(self.names), ) - (detections, annotated_img) = self.postprocess_detections( - detections, annotator) + (detections, annotated_img) = self.postprocess_detections(detections, annotator) # Currently we support a single camera so we pass an empty string feed = "" - self.publish_vis(annotated_img, feed) + self.publish_vis(annotated_img, msg, feed) self.publish_detections(detections, msg, feed) + if self.save_detections: + cv2.imwrite(f"detections/{self.counter}.jpg", annotated_img) + self.counter += 1 + self.get_logger().info( - f"Finished in: {time.time() - startTime}, {1/(time.time() - startTime)} Hz") + f"Finished in: {time.time() - startTime}, {1/(time.time() - startTime)} Hz" + ) def main(args=None): diff --git a/src/perception/camera_object_detection/config/deepracer_config.yaml b/src/perception/camera_object_detection/config/deepracer_config.yaml index 542efa7a..88a2e959 100755 --- a/src/perception/camera_object_detection/config/deepracer_config.yaml +++ b/src/perception/camera_object_detection/config/deepracer_config.yaml @@ -3,5 +3,5 @@ camera_object_detection_node: camera_topic: /camera_pkg/display_mjpeg publish_vis_topic: /annotated_img publish_obstacle_topic: /obstacles - model_path: /perception_models/yolov8s.pt - image_size: 480 + model_path: /perception_models/yolov8m.pt + image_size: 1024 diff --git a/src/perception/camera_object_detection/config/eve_config.yaml b/src/perception/camera_object_detection/config/eve_config.yaml new file mode 100755 index 00000000..208ccbe6 --- /dev/null +++ b/src/perception/camera_object_detection/config/eve_config.yaml @@ -0,0 +1,23 @@ +left_camera_object_detection_node: + ros__parameters: + camera_topic: /camera/left/image_color + publish_vis_topic: /camera/left/camera_detections_viz + publish_detection_topic: /camera/left/camera_detections + model_path: /perception_models/yolov8m.pt + image_size: 1024 + +center_camera_object_detection_node: + ros__parameters: + camera_topic: /camera/center/image_color + publish_vis_topic: /camera/center/camera_detections_viz + publish_detection_topic: /camera/center/camera_detections + model_path: /perception_models/yolov8m.pt + image_size: 1024 + +right_camera_object_detection_node: + ros__parameters: + camera_topic: /camera/right/image_color + publish_vis_topic: /camera/right/camera_detections_viz + publish_detection_topic: /camera/right/camera_detections + model_path: /perception_models/yolov8m.pt + image_size: 1024 diff --git a/src/perception/camera_object_detection/config/nuscenes_config.yaml b/src/perception/camera_object_detection/config/nuscenes_config.yaml index 6dff0554..6251c4b0 100755 --- a/src/perception/camera_object_detection/config/nuscenes_config.yaml +++ b/src/perception/camera_object_detection/config/nuscenes_config.yaml @@ -3,6 +3,6 @@ camera_object_detection_node: camera_topic: /CAM_FRONT/image_rect_compressed publish_vis_topic: /annotated_img publish_obstacle_topic: /obstacles - model_path: /perception_models/yolov8s.pt + model_path: /perception_models/yolov8m.pt image_size: 640 compressed: true diff --git a/src/perception/camera_object_detection/config/sim_config.yaml b/src/perception/camera_object_detection/config/sim_config.yaml deleted file mode 100755 index ec13a71b..00000000 --- a/src/perception/camera_object_detection/config/sim_config.yaml +++ /dev/null @@ -1,7 +0,0 @@ -camera_object_detection_node: - ros__parameters: - camera_topic: /camera/right/image_color - publish_vis_topic: /annotated_img - publish_obstacle_topic: /obstacles - model_path: /perception_models/yolov8s.pt - image_size: 480 diff --git a/src/perception/camera_object_detection/config/traffic_light_config.yaml b/src/perception/camera_object_detection/config/traffic_light_config.yaml new file mode 100755 index 00000000..9a3e00c8 --- /dev/null +++ b/src/perception/camera_object_detection/config/traffic_light_config.yaml @@ -0,0 +1,9 @@ +traffic_light_node: + ros__parameters: + camera_topic: /camera/left/image_color + publish_vis_topic: /traffic_lights_viz + publish_detection_topic: /traffic_lights + model_path: /perception_models/traffic_light.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false diff --git a/src/perception/camera_object_detection/launch/deepracer_launch.py b/src/perception/camera_object_detection/launch/deepracer.launch.py similarity index 82% rename from src/perception/camera_object_detection/launch/deepracer_launch.py rename to src/perception/camera_object_detection/launch/deepracer.launch.py index c853a556..138c32b4 100755 --- a/src/perception/camera_object_detection/launch/deepracer_launch.py +++ b/src/perception/camera_object_detection/launch/deepracer.launch.py @@ -5,14 +5,12 @@ def generate_launch_description(): - ld = LaunchDescription() config = os.path.join( get_package_share_directory('camera_object_detection'), 'config', 'deeepracer.yaml' ) - # nodes camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', @@ -20,7 +18,4 @@ def generate_launch_description(): parameters=[config] ) - # finalize - ld.add_action(camera_object_detection_node) - - return ld + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/launch/eve.launch.py b/src/perception/camera_object_detection/launch/eve.launch.py new file mode 100755 index 00000000..53c3eb08 --- /dev/null +++ b/src/perception/camera_object_detection/launch/eve.launch.py @@ -0,0 +1,43 @@ +from launch import LaunchDescription +from launch.substitutions import LaunchConfiguration +from launch.conditions import LaunchConfigurationEquals +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + launch_traffic_light = LaunchConfiguration("launch_traffic_light", default=True) + launch_traffic_light_arg = DeclareLaunchArgument( + "launch_traffic_light", + default_value=launch_traffic_light, + description="Launch traffic light detection", + ) + + launch_args = [launch_traffic_light_arg] + + camera_object_detection_launch_include_dir = os.path.join( + get_package_share_directory("camera_object_detection"), "launch", "include" + ) + + pretrained_yolov8_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [camera_object_detection_launch_include_dir, "/pretrained_yolov8.launch.py"] + ), + ) + + traffic_light_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [camera_object_detection_launch_include_dir, "/traffic_light.launch.py"] + ), + condition=LaunchConfigurationEquals("launch_traffic_light", "True"), + ) + + return LaunchDescription( + launch_args + + [ + pretrained_yolov8_launch, + traffic_light_launch, + ] + ) diff --git a/src/perception/camera_object_detection/launch/include/pretrained_yolov8.launch.py b/src/perception/camera_object_detection/launch/include/pretrained_yolov8.launch.py new file mode 100644 index 00000000..13096462 --- /dev/null +++ b/src/perception/camera_object_detection/launch/include/pretrained_yolov8.launch.py @@ -0,0 +1,39 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + config = os.path.join( + get_package_share_directory("camera_object_detection"), "config", "eve_config.yaml" + ) + + left_camera_object_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="left_camera_object_detection_node", + parameters=[config], + ) + + center_camera_object_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="center_camera_object_detection_node", + parameters=[config], + ) + + right_camera_object_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="right_camera_object_detection_node", + parameters=[config], + ) + + return LaunchDescription( + [ + left_camera_object_detection_node, + center_camera_object_detection_node, + right_camera_object_detection_node, + ] + ) diff --git a/src/perception/camera_object_detection/launch/sim_launch.py b/src/perception/camera_object_detection/launch/include/traffic_light.launch.py similarity index 71% rename from src/perception/camera_object_detection/launch/sim_launch.py rename to src/perception/camera_object_detection/launch/include/traffic_light.launch.py index 12af5eb0..234844b8 100755 --- a/src/perception/camera_object_detection/launch/sim_launch.py +++ b/src/perception/camera_object_detection/launch/include/traffic_light.launch.py @@ -5,22 +5,17 @@ def generate_launch_description(): - ld = LaunchDescription() config = os.path.join( get_package_share_directory('camera_object_detection'), 'config', - 'sim_config.yaml' + 'traffic_light_config.yaml' ) - # nodes camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', - name='camera_object_detection_node', + name='traffic_light_node', parameters=[config] ) - # finalize - ld.add_action(camera_object_detection_node) - - return ld + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/launch/nuscenes_launch.py b/src/perception/camera_object_detection/launch/nuscenes.launch.py similarity index 87% rename from src/perception/camera_object_detection/launch/nuscenes_launch.py rename to src/perception/camera_object_detection/launch/nuscenes.launch.py index 7a366260..faff24b6 100755 --- a/src/perception/camera_object_detection/launch/nuscenes_launch.py +++ b/src/perception/camera_object_detection/launch/nuscenes.launch.py @@ -12,7 +12,6 @@ def generate_launch_description(): 'nuscenes_config.yaml' ) - # nodes camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', @@ -21,7 +20,4 @@ def generate_launch_description(): arguments=['--ros-args', '--log-level', 'info'] ) - # finalize - ld.add_action(camera_object_detection_node) - - return ld + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/setup.py b/src/perception/camera_object_detection/setup.py index 7ba35c15..c9665f79 100755 --- a/src/perception/camera_object_detection/setup.py +++ b/src/perception/camera_object_detection/setup.py @@ -12,7 +12,8 @@ ('share/ament_index/resource_index/packages', ['resource/' + package_name]), ('share/' + package_name, ['package.xml']), - (os.path.join('share', package_name, 'launch'), glob('launch/*.py')), + (os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')), + (os.path.join('share', package_name, 'launch', 'include'), glob('launch/include/*.launch.py')), (os.path.join('share', package_name, 'config'), glob('config/*.yaml')), ], install_requires=['setuptools'], diff --git a/src/simulation/carla_config/CMakeLists.txt b/src/simulation/carla_config/CMakeLists.txt new file mode 100644 index 00000000..deba4f20 --- /dev/null +++ b/src/simulation/carla_config/CMakeLists.txt @@ -0,0 +1,63 @@ +cmake_minimum_required(VERSION 3.5) +project(carla_config) + +# Default to C99 +if(NOT CMAKE_C_STANDARD) + set(CMAKE_C_STANDARD 99) +endif() + +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# find dependencies +find_package(ament_cmake REQUIRED) +find_package(rclcpp REQUIRED) +find_package(path_planning_msgs REQUIRED) +find_package(embedded_msgs REQUIRED) + +# Create ROS2 node executable from source files +add_executable(carla_mpc_bridge src/carla_mpc_bridge.cpp) + +if(BUILD_TESTING) + # Search for dependencies required for building tests + linting + find_package(ament_cmake_gtest REQUIRED) + + # Create library to link cpp file with test + add_library(mpc_bridge_lib + src/carla_mpc_bridge.cpp) + + # Add ROS2 dependencies required by package + ament_target_dependencies(mpc_bridge_lib rclcpp path_planning_msgs embedded_msgs) + + # Create test executable from source files + ament_add_gtest(mpc_bridge_test test/mpc_bridge_test.cpp) + # Link to dependencies + target_link_libraries(mpc_bridge_test mpc_bridge_lib) + + # Copy executable to installation location + install(TARGETS + mpc_bridge_test + DESTINATION lib/${PROJECT_NAME}) +endif() + +# Link with dependencies +ament_target_dependencies(carla_mpc_bridge rclcpp path_planning_msgs embedded_msgs) + +# Copy executable to installation location +install(TARGETS + carla_mpc_bridge + DESTINATION lib/${PROJECT_NAME}) + +# Copy launch and config files to installation location +install(DIRECTORY + config + launch + DESTINATION share/${PROJECT_NAME}) + +ament_package() \ No newline at end of file diff --git a/src/simulation/carla_config/config/carla_settings.yaml b/src/simulation/carla_config/config/carla_settings.yaml new file mode 100644 index 00000000..d002d01c --- /dev/null +++ b/src/simulation/carla_config/config/carla_settings.yaml @@ -0,0 +1,28 @@ +use_sim_time: true +carla: + # the network connection for the python connection to CARLA, can be overwritten by launch parameters + # don't make anything other than the host setting reference an environment variable + host: $(optenv CARLA_HOSTNAME localhost) + port: 2000 + timeout: 20 + # enable/disable synchronous mode. If enabled ros-bridge waits until + # expected data is received for all sensors + synchronous_mode: true + # within synchronous mode: wait for a vehicle control command before next tick? + synchronous_mode_wait_for_vehicle_control_command: false + # set the fixed timestep length + fixed_delta_seconds: 0.05 + # town + # options: + # Town05, mcity + town: Town10HD_Opt + # configuration values for the ego vehicle + ego_vehicle: + # the role name of the vehicles that acts as ego vehicle for this ros bridge instance + # Only the vehicles within this list are controllable from within ROS. + # (the vehicle from CARLA is selected which has the attribute 'role_name' set to this value) + role_name: ego + ackermann_control: + # override the default values of the pid speed controller + # (only relevant for ackermann control mode) + control_loop_rate: 0.05 \ No newline at end of file diff --git a/src/simulation/carla_config/config/mpc_bridge_config.yaml b/src/simulation/carla_config/config/mpc_bridge_config.yaml new file mode 100644 index 00000000..93bf14b8 --- /dev/null +++ b/src/simulation/carla_config/config/mpc_bridge_config.yaml @@ -0,0 +1,4 @@ +mpc_bridge_node: + ros_parameters: + mpc_output_topic: /mpc_output + steering_publisher_topic: /steering_data \ No newline at end of file diff --git a/src/simulation/carla_config/config/objects.json b/src/simulation/carla_config/config/objects.json new file mode 100644 index 00000000..42280919 --- /dev/null +++ b/src/simulation/carla_config/config/objects.json @@ -0,0 +1,280 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_right", + "spawn_point": {"x": 0.304, "y": 0.345, "z": 1.723, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 1920, + "image_size_y": 1200, + "fov": 83, + "sensor_tick": 0.05, + "gamma": 2.2, + "shutter_speed": 200.0, + "iso": 100.0, + "fstop": 1.2, + "min_fstop": 1.2, + "blade_count": 5, + "exposure_mode": "histogram", + "exposure_compensation": 0.0, + "exposure_min_bright": 7.0, + "exposure_max_bright": 9.0, + "exposure_speed_up": 3.0, + "exposure_speed_down": 1.0, + "calibration_constant": 16.0, + "focal_distance": 1085, + "blur_amount": 1.0, + "blur_radius": 0.0, + "motion_blur_intensity": 0.45, + "motion_blur_max_distortion": 0.35, + "motion_blur_min_object_screen_size": 0.1, + "slope": 0.88, + "toe": 0.55, + "shoulder": 0.26, + "black_clip": 0.0, + "white_clip": 0.04, + "temp": 6500.0, + "tint": 0.0, + "chromatic_aberration_intensity": 0.0, + "chromatic_aberration_offset": 0.0, + "enable_postprocess_effects": "True", + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "right_seg", + "spawn_point": {"x": 0.304, "y": 0.345, "z": 1.723, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 1920, + "image_size_y": 1200, + "fov": 83, + "sensor_tick": 0.05, + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_center", + "spawn_point": {"x": 0.304, "y": 0.0, "z": 1.617, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 1920, + "image_size_y": 1200, + "fov": 120, + "sensor_tick": 0.05, + "gamma": 2.2, + "shutter_speed": 60.0, + "iso": 1200.0, + "fstop": 1.2, + "min_fstop": 1.2, + "blade_count": 5, + "exposure_mode": "histogram", + "exposure_compensation": 0.0, + "exposure_min_bright": 7.0, + "exposure_max_bright": 9.0, + "exposure_speed_up": 3.0, + "exposure_speed_down": 1.0, + "calibration_constant": 16.0, + "focal_distance": 1085, + "blur_amount": 1.0, + "blur_radius": 0.0, + "motion_blur_intensity": 0.45, + "motion_blur_max_distortion": 0.35, + "motion_blur_min_object_screen_size": 0.1, + "slope": 0.88, + "toe": 0.55, + "shoulder": 0.26, + "black_clip": 0.0, + "white_clip": 0.04, + "temp": 6500.0, + "tint": 0.0, + "chromatic_aberration_intensity": 0.0, + "chromatic_aberration_offset": 0.0, + "enable_postprocess_effects": "True", + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_left", + "spawn_point": {"x": 0.304, "y": -0.345, "z": 1.723, "roll": 0.0, "pitch": 0.0, "yaw": -20.0}, + "image_size_x": 1920, + "image_size_y": 1200, + "fov": 83, + "sensor_tick": 0.05, + "gamma": 2.2, + "shutter_speed": 200.0, + "iso": 100.0, + "fstop": 1.2, + "min_fstop": 1.2, + "blade_count": 5, + "exposure_mode": "histogram", + "exposure_compensation": 0.0, + "exposure_min_bright": 7.0, + "exposure_max_bright": 9.0, + "exposure_speed_up": 3.0, + "exposure_speed_down": 1.0, + "calibration_constant": 16.0, + "focal_distance": 1085, + "blur_amount": 1.0, + "blur_radius": 0.0, + "motion_blur_intensity": 0.45, + "motion_blur_max_distortion": 0.35, + "motion_blur_min_object_screen_size": 0.1, + "slope": 0.88, + "toe": 0.55, + "shoulder": 0.26, + "black_clip": 0.0, + "white_clip": 0.04, + "temp": 6500.0, + "tint": 0.0, + "chromatic_aberration_intensity": 0.0, + "chromatic_aberration_offset": 0.0, + "enable_postprocess_effects": "True", + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "sensor_tick": 0.05, + "gamma": 2.2, + "shutter_speed": 200.0, + "iso": 100.0, + "fstop": 8.0, + "min_fstop": 1.2, + "blade_count": 5, + "exposure_mode": "histogram", + "exposure_compensation": 0.0, + "exposure_min_bright": 7.0, + "exposure_max_bright": 9.0, + "exposure_speed_up": 3.0, + "exposure_speed_down": 1.0, + "calibration_constant": 16.0, + "focal_distance": 1000.0, + "blur_amount": 1.0, + "blur_radius": 0.0, + "motion_blur_intensity": 0.45, + "motion_blur_max_distortion": 0.35, + "motion_blur_min_object_screen_size": 0.1, + "slope": 0.88, + "toe": 0.55, + "shoulder": 0.26, + "black_clip": 0.0, + "white_clip": 0.04, + "temp": 6500.0, + "tint": 0.0, + "chromatic_aberration_intensity": 0.0, + "chromatic_aberration_offset": 0.0, + "enable_postprocess_effects": "True", + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08, + "bloom_intensity": 0.675, + "lens_flare_intensity": 0.1 + }, + { + "type": "sensor.lidar.ray_cast", + "id": "center_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.03, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 100, + "channels": 32, + "points_per_second": 1200000, + "upper_fov": 15, + "lower_fov": -25, + "rotation_frequency": 20, + "sensor_tick": 0.05 + }, + { + "type": "sensor.other.radar", + "id": "front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 1.5, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision1", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "laneinvasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + } + + ] + } + ] +} \ No newline at end of file diff --git a/src/simulation/carla_config/launch/carla.launch.py b/src/simulation/carla_config/launch/carla.launch.py new file mode 100644 index 00000000..888ef992 --- /dev/null +++ b/src/simulation/carla_config/launch/carla.launch.py @@ -0,0 +1,175 @@ +from launch import LaunchDescription +from launch_ros.actions import Node + +# For creating launch arguments +from launch.actions import DeclareLaunchArgument +from launch.substitutions import TextSubstitution, LaunchConfiguration +from launch.conditions import UnlessCondition + +# To load params from yaml file +import os +import yaml +from ament_index_python.packages import get_package_share_directory + +# For using other launch files +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource + +# Use the second param (the launch argument) unless it is empty + + +def CheckParam(param1, param2): + try: + return param2 + except NameError: + return param1 + + +def generate_launch_description(): + """ Get file path of yaml file """ + config_file_path = os.path.join( + get_package_share_directory('carla_config'), + 'config', + 'carla_settings.yaml' + ) + mpc_bridge_confile_file_path = os.path.join( + get_package_share_directory('carla_config'), + 'config', + 'mpc_bridge_config.yaml' + ) + + """ Load params from yaml file """ + with open(config_file_path, 'r') as config_file: + params = yaml.safe_load(config_file) + with open(mpc_bridge_confile_file_path, 'r') as config_file: + mpc_bridge_config = yaml.safe_load(config_file) + + """ Get hostname from yaml file """ + # Checking if the hostname provided in the yaml file is referencing an env + # variable + if '$' in params['carla']['host']: + hostname = os.environ.get( + params['carla']['host'].split()[1], + params['carla']['host'].split()[2]) + else: + hostname = params['carla']['host'] + + """ Declare launch arguments """ + # carla-ros-bridge args: See carla_settings.yaml for default arg values + host_arg = DeclareLaunchArgument('host', default_value=hostname) + port_arg = DeclareLaunchArgument( + 'port', default_value=str( + params['carla']['port'])) + timeout_arg = DeclareLaunchArgument( + 'timeout', default_value=str( + params['carla']['timeout'])) + sync_mode_arg = DeclareLaunchArgument( + 'synchronous_mode', default_value=str( + params['carla']['synchronous_mode'])) + sync_mode_wait_arg = DeclareLaunchArgument( + 'synchronous_mode_wait_for_vehicle_control_command', default_value=str( + params['carla']['synchronous_mode_wait_for_vehicle_control_command'])) + fixed_delta_arg = DeclareLaunchArgument( + 'fixed_delta_seconds', default_value=str( + params['carla']['fixed_delta_seconds'])) + town_arg = DeclareLaunchArgument( + 'town', default_value=params['carla']['town']) + + # Carla ego vehicle args + objects_definition_arg = DeclareLaunchArgument( + 'objects_definition_file', + default_value=str( + get_package_share_directory('carla_config') + + '/config/objects.json')) + role_name_arg = DeclareLaunchArgument( + 'role_name', default_value=params['carla']['ego_vehicle']['role_name']) + spawn_point_arg = DeclareLaunchArgument( + 'spawn_point', default_value='None') + spawn_sensors_only_arg = DeclareLaunchArgument( + 'spawn_sensors_only', default_value='False') + + # Ackermann control args + control_loop_rate_arg = DeclareLaunchArgument( + 'control_loop_rate', default_value=str( + params['carla']['ackermann_control']['control_loop_rate'])) + + """Launch carla ros bridge node.""" + carla_ros_bridge = Node( + package='carla_ros_bridge', + executable='bridge', + output='screen', + parameters=[{ + 'host': LaunchConfiguration('host'), + 'port': LaunchConfiguration('port'), + 'timeout': LaunchConfiguration('timeout'), + 'synchronous_mode': LaunchConfiguration('synchronous_mode'), + 'synchronous_mode_wait_for_vehicle_control_command': LaunchConfiguration('synchronous_mode_wait_for_vehicle_control_command'), + 'fixed_delta_seconds': LaunchConfiguration('fixed_delta_seconds'), + 'town': LaunchConfiguration('town'), + }], + respawn=True + ) + + """Launch ego vehicle spawner nodes (using carla_example_ego_vehicle.launch.py)""" + carla_ego_vehicle = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join( + get_package_share_directory('carla_spawn_objects'), + 'carla_example_ego_vehicle.launch.py')), + launch_arguments={ + 'objects_definition_file': LaunchConfiguration('objects_definition_file'), + 'role_name': LaunchConfiguration('role_name'), + 'spawn_point_ego_vehicle': LaunchConfiguration('spawn_point'), + 'spawn_sensors_only': LaunchConfiguration('spawn_sensors_only')}.items()) + + if (os.environ.get('USE_ACKERMANN_CONTROL', 'True').lower() == 'true'): + """ Launch Ackermann Control Node """ + carla_control = Node( + package='carla_ackermann_control', + executable='carla_ackermann_control_node', + output='screen', + parameters=[{ + 'role_name': LaunchConfiguration('role_name'), + 'control_loop_rate': LaunchConfiguration('control_loop_rate') + }] + ) + else: + carla_control = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(get_package_share_directory( + 'carla_manual_control'), 'carla_manual_control.launch.py') + ), + launch_arguments={ + 'role_name': LaunchConfiguration('role_name') + }.items() + ) + + """ Launch MPC Bridge Node """ + carla_mpc_bridge = Node( + package='carla_config', + executable='carla_mpc_bridge', + parameters=[{ + 'mpc_moutput_topic': mpc_bridge_config['mpc_bridge_node']['ros_parameters']['mpc_output_topic'], + 'steering_publisher_topic': mpc_bridge_config['mpc_bridge_node']['ros_parameters']['steering_publisher_topic'] + }], + output='screen' + ) + + return LaunchDescription([ + host_arg, + port_arg, + timeout_arg, + sync_mode_arg, + sync_mode_wait_arg, + fixed_delta_arg, + town_arg, + objects_definition_arg, + role_name_arg, + spawn_point_arg, + spawn_sensors_only_arg, + control_loop_rate_arg, + carla_ros_bridge, + carla_ego_vehicle, + carla_control, + carla_mpc_bridge, + ]) diff --git a/src/simulation/carla_config/package.xml b/src/simulation/carla_config/package.xml new file mode 100644 index 00000000..9ea6c3b5 --- /dev/null +++ b/src/simulation/carla_config/package.xml @@ -0,0 +1,20 @@ + + + carla_config + 0.0.0 + Contains launch files, scenario scripts, and custom carla bridge nodes + + Vishal Jayakumar + + TODO: License declaration + + rclcpp + path_planning_msgs + embedded_msgs + + ament_cmake + + + ament_cmake + + \ No newline at end of file diff --git a/src/simulation/carla_config/src/carla_mpc_bridge.cpp b/src/simulation/carla_config/src/carla_mpc_bridge.cpp new file mode 100644 index 00000000..dce4b3bf --- /dev/null +++ b/src/simulation/carla_config/src/carla_mpc_bridge.cpp @@ -0,0 +1,109 @@ +#include "embedded_msgs/msg/steering_angle_can.hpp" +#include "path_planning_msgs/msg/ackermann_drive.hpp" +#include "path_planning_msgs/msg/carla_ego_vehicle_status.hpp" +#include "path_planning_msgs/msg/environment.hpp" +#include "path_planning_msgs/msg/mpc_output.hpp" +#include "rclcpp/rclcpp.hpp" +using std::placeholders::_1; + +class MPC_Bridge_Node : public rclcpp::Node { + // offset for the rosbag + double xOff = 0; + double yOff = 0; + + rclcpp::Publisher::SharedPtr steeringPub; + rclcpp::Publisher::SharedPtr carlaPub; + + public: + MPC_Bridge_Node() : Node("carla_mpc_bridge") { + // Declare Parameters + this->declare_parameter("mpc_output_topic", "/mpc_output"); + this->declare_parameter("steering_publisher_topic", "/steering_data"); + + // Get Parameters + std::string mpc_output_topic = this->get_parameter("mpc_output_topic").as_string(); + std::string steering_publisher_topic = + this->get_parameter("steering_publisher_topic").as_string(); + + // Wait for service + auto parameters_client = + std::make_shared(this, "set_initial_pose"); + while (!parameters_client->wait_for_service(std::chrono::seconds(1))) { + if (!rclcpp::ok()) { + RCLCPP_ERROR(this->get_logger(), "Interrupted while waiting for the service. Exiting."); + return; + } + RCLCPP_INFO(this->get_logger(), "service not available, waiting again..."); + } + + // Check if role name specified + std::string role_name; + std::string param_name = "role_name"; + do { + role_name = parameters_client->get_parameters({"role_name"})[0].get_value(); + std::this_thread::sleep_for(std::chrono::seconds(1)); + } while (role_name == ""); + + // Subscribe to vehicle status + rclcpp::Subscription::SharedPtr + steeringAngleSub; + steeringAngleSub = this->create_subscription( + "/carla/" + role_name + "/vehicle_status", 2, + std::bind(&MPC_Bridge_Node::publish_steering, this, _1)); + + // Subscribe to MPC Output + rclcpp::Subscription::SharedPtr mpcSub; + mpcSub = this->create_subscription( + mpc_output_topic, 2, std::bind(&MPC_Bridge_Node::mpc_to_carla, this, _1)); + + rclcpp::Subscription::SharedPtr envSub; + envSub = this->create_subscription( + "/path_planning/environment", 2, std::bind(&MPC_Bridge_Node::env_callback, this, _1)); + + this->steeringPub = + this->create_publisher(steering_publisher_topic, 1); + this->carlaPub = this->create_publisher( + "/carla/" + role_name + "/ackermann_cmd", 1); + } + + private: + void publish_steering(path_planning_msgs::msg::CarlaEgoVehicleStatus::SharedPtr vehicle) { + // steer is between -1 and 1, max steer is 70 degrees (1.22173rad), so + // multiply steer by rad to get approximate steer rwa = + // (steer.steering_angle * 0.0629 - 0.1363) * 0.0174533; + auto steer = embedded_msgs::msg::SteeringAngleCAN(); + steer.steering_angle = (vehicle->control.steer * -70 + 0.1363) / 0.0629; + this->steeringPub->publish(steer); + } + + void env_callback(path_planning_msgs::msg::Environment::SharedPtr env) { + static bool receivedEnv = false; + + if (!receivedEnv) { + receivedEnv = true; + this->xOff = env->global_pose.position.x; + this->yOff = env->global_pose.position.y; + } + } + + void mpc_to_carla(path_planning_msgs::msg::MPCOutput::SharedPtr mpcOutput) { + auto carlaControl = path_planning_msgs::msg::AckermannDrive(); + carlaControl.steering_angle = mpcOutput->steer; + carlaControl.speed = mpcOutput->accel > 0 ? mpcOutput->accel : 0; + this->carlaPub->publish(carlaControl); + } +}; + +int main(int argc, char **argv) { + // Initialize ROS2 + rclcpp::init(argc, argv); + + // Create Node + auto node = std::make_shared(); + + rclcpp::spin(node); + + rclcpp::shutdown(); + + return 0; +} \ No newline at end of file diff --git a/src/simulation/carla_config/test/mpc_bridge_test.cpp b/src/simulation/carla_config/test/mpc_bridge_test.cpp new file mode 100644 index 00000000..e69de29b diff --git a/src/simulation/carla_notebooks/.gitignore b/src/simulation/carla_notebooks/.gitignore new file mode 100644 index 00000000..30992009 --- /dev/null +++ b/src/simulation/carla_notebooks/.gitignore @@ -0,0 +1,2 @@ +.ipynb_checkpoints +_pycache_ \ No newline at end of file diff --git a/src/simulation/carla_notebooks/sample_notebook.ipynb b/src/simulation/carla_notebooks/sample_notebook.ipynb new file mode 100644 index 00000000..1e270af5 --- /dev/null +++ b/src/simulation/carla_notebooks/sample_notebook.ipynb @@ -0,0 +1,74 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup Code\n", + "**DO NOT CHANGE THIS!!**\n", + "**RUN THE CELL BELOW FIRST!!**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import carla\n", + "import os\n", + "\n", + "client_name = os.environ.get(\"CLIENT_NAME\", \"DOES NOT EXIST\")\n", + "if client_name == \"DOES NOT EXIST\":\n", + " raise Exception(\"The environment variable for the container name of the carla server has not been set\")\n", + "\n", + "# Connect to the client and retrieve the world object\n", + "client = carla.Client(client_name, 2000)\n", + "world = client.get_world()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Start Adding Code or Markdown Cells Below\n", + "Some helpful code cells are provided below that you may choose to use" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#### SPAWN VEHICLES ####\n", + "\"\"\"\n", + "# Get the blueprint library and filter for the vehicle blueprints\n", + "vehicle_blueprints = world.get_blueprint_library().filter('*vehicle*')\n", + "# Get the map's spawn points\n", + "spawn_points = world.get_map().get_spawn_points()\n", + "\n", + "# Spawn 50 vehicles randomly distributed throughout the map \n", + "# for each spawn point, we choose a random vehicle from the blueprint library\n", + "for i in range(0,50):\n", + " world.try_spawn_actor(random.choice(vehicle_blueprints), random.choice(spawn_points)))\n", + "\"\"\"\n", + "#### SET ALL VEHICLES TO AUTOPILOT\n", + "\"\"\"\n", + "for vehicle in world.get_actors().filter('*vehicle*'):\n", + " vehicle.set_autopilot(True)\n", + "\"\"\"" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/src/simulation/carla_notebooks/scenarios/custom_scenarios.ipynb b/src/simulation/carla_notebooks/scenarios/custom_scenarios.ipynb new file mode 100644 index 00000000..d89d2407 --- /dev/null +++ b/src/simulation/carla_notebooks/scenarios/custom_scenarios.ipynb @@ -0,0 +1,20 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python /home/docker/scenario_runner/scenario_runner.py --host $CLIENT_NAME --configFile /home/docker/carla_notebooks/scenarios/custom_scenarios.xml --scenario OtherLeadingVehicle_Custom --waitForEgo" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/src/simulation/carla_notebooks/scenarios/custom_scenarios.xml b/src/simulation/carla_notebooks/scenarios/custom_scenarios.xml new file mode 100644 index 00000000..5514d042 --- /dev/null +++ b/src/simulation/carla_notebooks/scenarios/custom_scenarios.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/src/simulation/carla_sample_node/.gitignore b/src/simulation/carla_sample_node/.gitignore new file mode 100644 index 00000000..cd3d2253 --- /dev/null +++ b/src/simulation/carla_sample_node/.gitignore @@ -0,0 +1 @@ +logs \ No newline at end of file diff --git a/src/simulation/carla_sample_node/carla_sample_node/__init__.py b/src/simulation/carla_sample_node/carla_sample_node/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/simulation/carla_sample_node/carla_sample_node/carla_sample_node.py b/src/simulation/carla_sample_node/carla_sample_node/carla_sample_node.py new file mode 100644 index 00000000..37f8e660 --- /dev/null +++ b/src/simulation/carla_sample_node/carla_sample_node/carla_sample_node.py @@ -0,0 +1,56 @@ +import rclpy +from rclpy.node import Node + +from nav_msgs.msg import Odometry # For odometry +from sensor_msgs.msg import NavSatFix # For GNSS + +from std_msgs.msg import Bool + +import os + + +class Datalogger(Node): + def __init__(self): + super().__init__('datalogger') + self.declare_parameter("publish_autopilot", False) + self.gnssSubscription = self.create_subscription( + NavSatFix, + '/carla/ego_vehicle/gnss', + self.gnss_callback, + 10 + ) + if self.get_parameter("publish_autopilot").value: + self.autopilotPublisher = self.create_publisher( + Bool, + '/carla/ego_vehicle/enable_autopilot', + 10 + ) + # Publish autopilot message every 10 seconds + self.timer = self.create_timer(10, self.timer_callback) + + def gnss_callback(self, msg): + # with open("/home/docker/ament_ws/src/carla_sample_node/logs/gnss_" + self.containerId + ".txt", 'a+') as file: + # file.write(str(msg.latitude) + ", " + str(msg.longitude) + "\n") + self.get_logger().info(str(msg.latitude) + ", " + + str(msg.longitude)) # print to screen + + def timer_callback(self): + msg = Bool() + msg.data = True + self.autopilotPublisher.publish(msg) + + +def main(args=None): + rclpy.init(args=args) + + datalogger = Datalogger() + + rclpy.spin(datalogger) + + datalogger.destroy_node() + rclpy.shutdown() + return + + +if __name__ == '__main__': + main() diff --git a/src/simulation/carla_sample_node/launch/carla_sample_node.launch.py b/src/simulation/carla_sample_node/launch/carla_sample_node.launch.py new file mode 100644 index 00000000..45a077bb --- /dev/null +++ b/src/simulation/carla_sample_node/launch/carla_sample_node.launch.py @@ -0,0 +1,25 @@ +from launch import LaunchDescription +import launch_ros.actions + +# For creating launch arguments +from launch.actions import DeclareLaunchArgument +from launch.substitutions import LaunchConfiguration + + +def generate_launch_description(): + publish_autopilot_arg = DeclareLaunchArgument( + 'publish_autopilot', + default_value='False' + ) + + return LaunchDescription( + [ + launch_ros.actions.Node( + namespace="carla_sample_node", + package='carla_sample_node', + executable='carla_sample_node', + parameters=[{ + 'publish_autopilot': LaunchConfiguration('publish_autopilot') + }], + output='screen') + ]) diff --git a/src/simulation/carla_sample_node/package.xml b/src/simulation/carla_sample_node/package.xml new file mode 100644 index 00000000..7e3d0b25 --- /dev/null +++ b/src/simulation/carla_sample_node/package.xml @@ -0,0 +1,20 @@ + + + + carla_sample_node + 0.0.0 + Sample python node to read data from CARLA + Vishal Jayakumar + TODO: License declaration + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + rclpy + + + ament_python + + \ No newline at end of file diff --git a/src/simulation/carla_sample_node/resource/carla_sample_node b/src/simulation/carla_sample_node/resource/carla_sample_node new file mode 100644 index 00000000..e69de29b diff --git a/src/simulation/carla_sample_node/setup.cfg b/src/simulation/carla_sample_node/setup.cfg new file mode 100644 index 00000000..e9a26297 --- /dev/null +++ b/src/simulation/carla_sample_node/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/carla_sample_node +[install] +install_scripts=$base/lib/carla_sample_node \ No newline at end of file diff --git a/src/simulation/carla_sample_node/setup.py b/src/simulation/carla_sample_node/setup.py new file mode 100644 index 00000000..18127651 --- /dev/null +++ b/src/simulation/carla_sample_node/setup.py @@ -0,0 +1,29 @@ +from setuptools import setup +from glob import glob +import os + +package_name = 'carla_sample_node' + +setup( + name=package_name, + version='0.0.0', + packages=[package_name], + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ('share/' + package_name, glob(os.path.join('launch', '*.launch.py'))) + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='Vishal Jayakumar', + maintainer_email='v3jayaku@watonomous.ca', + description='Sample python node to read data from CARLA', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'carla_sample_node = carla_sample_node.carla_sample_node:main' + ], + }, +) diff --git a/src/wato_msgs/simulation/embedded_msgs/CMakeLists.txt b/src/wato_msgs/simulation/embedded_msgs/CMakeLists.txt new file mode 100644 index 00000000..2b721e39 --- /dev/null +++ b/src/wato_msgs/simulation/embedded_msgs/CMakeLists.txt @@ -0,0 +1,25 @@ +cmake_minimum_required(VERSION 3.10) +project(embedded_msgs) + +# Set compiler to use C++ 17 standard +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# Search for dependencies required for building this package +find_package(ament_cmake REQUIRED) # ROS2 build tool +find_package(std_msgs REQUIRED) # ROS2 common messages + +set(msg_files + msg/SteeringAngleCAN.msg) +# Parses message files to generate ROS2 interfaces +rosidl_generate_interfaces(embedded_msgs + ${msg_files} + DEPENDENCIES std_msgs) + +ament_export_dependencies(rosidl_default_runtime) +ament_package() \ No newline at end of file diff --git a/src/wato_msgs/simulation/embedded_msgs/msg/SteeringAngleCAN.msg b/src/wato_msgs/simulation/embedded_msgs/msg/SteeringAngleCAN.msg new file mode 100644 index 00000000..e274f9d0 --- /dev/null +++ b/src/wato_msgs/simulation/embedded_msgs/msg/SteeringAngleCAN.msg @@ -0,0 +1,4 @@ +float64 steering_angle +float64 steering_gradient +int8 steering_availability_status +bool driver_steering_interference_detected \ No newline at end of file diff --git a/src/wato_msgs/simulation/embedded_msgs/package.xml b/src/wato_msgs/simulation/embedded_msgs/package.xml new file mode 100644 index 00000000..aa63197f --- /dev/null +++ b/src/wato_msgs/simulation/embedded_msgs/package.xml @@ -0,0 +1,20 @@ + + + embedded_msgs + 0.0.0 + Embedded ROS2 Messages + + Vishal Jayakumar + TODO + + ament_cmake + std_msgs + + rosidl_default_generators + rosidl_default_runtime + rosidl_interface_packages + + + ament_cmake + + \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/CMakeLists.txt b/src/wato_msgs/simulation/path_planning_msgs/CMakeLists.txt new file mode 100644 index 00000000..8fd9b59e --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required(VERSION 3.10) +project(path_planning_msgs) + +# Set compiler to use C++ 17 standard +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# Search for dependencies required for building this package +find_package(ament_cmake REQUIRED) # ROS2 build tool +find_package(std_msgs REQUIRED) # ROS2 common messages +find_package(geometry_msgs REQUIRED) + +set(msg_files + msg/CarlaEgoVehicleStatus.msg + msg/CarlaEgoVehicleControl.msg + msg/Environment.msg + msg/MPCOutput.msg + msg/AckermannDrive.msg) +# Parses message files to generate ROS2 interfaces +rosidl_generate_interfaces(path_planning_msgs + ${msg_files} + DEPENDENCIES std_msgs geometry_msgs) + +ament_export_dependencies(rosidl_default_runtime) +ament_package() \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/AckermannDrive.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/AckermannDrive.msg new file mode 100644 index 00000000..ef4dcf89 --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/AckermannDrive.msg @@ -0,0 +1,38 @@ +## Driving command for a car-like vehicle using Ackermann steering. +# $Id$ + +# Assumes Ackermann front-wheel steering. The left and right front +# wheels are generally at different angles. To simplify, the commanded +# angle corresponds to the yaw of a virtual wheel located at the +# center of the front axle, like on a tricycle. Positive yaw is to +# the left. (This is *not* the angle of the steering wheel inside the +# passenger compartment.) +# +# Zero steering angle velocity means change the steering angle as +# quickly as possible. Positive velocity indicates a desired absolute +# rate of change either left or right. The controller tries not to +# exceed this limit in either direction, but sometimes it might. +# +float32 steering_angle # desired virtual angle (radians) +float32 steering_angle_velocity # desired rate of change (radians/s) + +# Drive at requested speed, acceleration and jerk (the 1st, 2nd and +# 3rd derivatives of position). All are measured at the vehicle's +# center of rotation, typically the center of the rear axle. The +# controller tries not to exceed these limits in either direction, but +# sometimes it might. +# +# Speed is the desired scalar magnitude of the velocity vector. +# Direction is forward unless the sign is negative, indicating reverse. +# +# Zero acceleration means change speed as quickly as +# possible. Positive acceleration indicates a desired absolute +# magnitude; that includes deceleration. +# +# Zero jerk means change acceleration as quickly as possible. Positive +# jerk indicates a desired absolute rate of acceleration change in +# either direction (increasing or decreasing). +# +float32 speed # desired forward speed (m/s) +float32 acceleration # desired acceleration (m/s^2) +float32 jerk # desired jerk (m/s^3) \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleControl.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleControl.msg new file mode 100644 index 00000000..0a69577f --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleControl.msg @@ -0,0 +1,32 @@ +# +# Copyright (c) 2018-2019 Intel Corporation. +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . +# +# This represents a vehicle control message sent to CARLA simulator + +std_msgs/Header header + +# The CARLA vehicle control data + +# 0. <= throttle <= 1. +float32 throttle + +# -1. <= steer <= 1. +float32 steer + +# 0. <= brake <= 1. +float32 brake + +# hand_brake 0 or 1 +bool hand_brake + +# reverse 0 or 1 +bool reverse + +# gear +int32 gear + +# manual gear shift +bool manual_gear_shift diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleStatus.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleStatus.msg new file mode 100644 index 00000000..650e40e5 --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleStatus.msg @@ -0,0 +1,15 @@ +# +# Copyright (c) 2019 Intel Corporation. +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . +# + +std_msgs/Header header + +float32 velocity +geometry_msgs/Accel acceleration +geometry_msgs/Quaternion orientation + +# the current control values, as reported by Carla +CarlaEgoVehicleControl control diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/Environment.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/Environment.msg new file mode 100644 index 00000000..c63726db --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/Environment.msg @@ -0,0 +1,9 @@ +std_msgs/Header header + +# Vehicle State +####################### + +float64 forward_speed # m/s +float64 forward_accel # m/s^2 +float64 wheel_angle # degrees (not sure if we want/need this) +geometry_msgs/Pose global_pose # in meters from where odometer was started up \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/MPCOutput.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/MPCOutput.msg new file mode 100644 index 00000000..4565da0a --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/MPCOutput.msg @@ -0,0 +1,2 @@ +float64 accel +float64 steer \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/package.xml b/src/wato_msgs/simulation/path_planning_msgs/package.xml new file mode 100644 index 00000000..a34f4a27 --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/package.xml @@ -0,0 +1,21 @@ + + + path_planning_msgs + 0.0.0 + Path Planning ROS2 Messages + + Vishal Jayakumar + TODO + + ament_cmake + std_msgs + geometry_msgs + + rosidl_default_generators + rosidl_default_runtime + rosidl_interface_packages + + + ament_cmake + + \ No newline at end of file diff --git a/watod b/watod index f81630da..0b5d1f87 100755 --- a/watod +++ b/watod @@ -8,13 +8,8 @@ ANSIBLE_TMP_DIR=/tmp/ansible-tmp-$USER function usage { echo "Usage: $programname [OPTIONS]... [COMMAND]..." - echo "Executes docker compose COMMAND in the the monorepo with appropriate environment variables." - echo " launch_autonomous launch the can_interface for autonomous mode" - echo " -dev --devel configures containers for development (allows you to edit inside a container)" + echo "Executes \"docker compose COMMAND\" in the the monorepo with appropriate environment variables." echo " -v --verbose verbose mode. Currently, prints .env variables" - echo " -lp --local-ports displays ports exposed by applications and shows the" - echo " ssh local port forwarding commands for each VNC Server found" - echo " if a docker compose command is specified, it will be run in the background." echo " -t --terminal [service_name] open a bash terminal into the desired service (eg: perception)." echo "" echo "Examples:" @@ -53,18 +48,10 @@ COMPOSE_CMD="" while [[ $# -gt 0 ]] ; do key="$1" case $key in - -dev|--devel) - DEVEL=1 - shift - ;; -v|--verbose) VERBOSE=1 shift # past argument ;; - -lp|--local-ports) - PRINT_LOCAL_PORTS=1 - shift - ;; -t|--terminal) START_TERMINAL=1 shift @@ -88,55 +75,28 @@ if [[ $# -gt 0 ]]; then COMPOSE_CMD="${COMPOSE_CMD} $@" fi -if [ ! -z $DEVEL ]; then - echo "DEVELOPER MODE ACTIVATED: Source files have been mounted within /home/bolty when you enter the container" - MODULES_DIR="$MONO_DIR/modules/dev_overrides" - export MODULES_DIR_EXP="$MODULES_DIR" +# Allow for local overrides of any of the below parameters (prioritized your local config) +if [ -f "$MONO_DIR/watod-config.local.sh" ]; then + source "$MONO_DIR/watod-config.local.sh" +elif [ -f "$MONO_DIR/watod-config.sh" ]; then + source "$MONO_DIR/watod-config.sh" fi -# generate .env file from watod_scripts/watod-setup-env.sh -if [ ! -z $VERBOSE ]; then # if we want to see all verbose coming from setting up env - cd $MONO_DIR && bash watod_scripts/watod-setup-env.sh +# Change docker-compose override according to mode of operation +MODE_OF_OPERATION=${MODE_OF_OPERATION:-"deploy"} +if [ $MODE_OF_OPERATION == "deploy" ]; then + MODULES_DIR="$MONO_DIR/modules" +elif [ $MODE_OF_OPERATION == "develop" ]; then + MODULES_DIR="$MONO_DIR/modules/dev_overrides" else - cd $MONO_DIR && bash watod_scripts/watod-setup-env.sh &> /dev/null + MODULES_DIR="$MONO_DIR/modules" fi -if [ ! -z $PRINT_LOCAL_PORTS ]; then - # get list of services to traverse - SERVICE_LIST="$(run_compose ps --services | sort)" - - # fixes hostnames for VMs so that by default $HOSTNAME is the first element - # in the array, which often is the domain name that can be accessed - # externally - read -ra HOSTNAME <<< $(hostname -A) - echo "=========================================================================" - echo "Ports exposed by running containers:" - echo "=========================================================================" - LOCAL_FORWARD_ALL="" - - PORTS_STRING="" - for SERVICE in $SERVICE_LIST; do - ENV_VARS="$(run_compose exec "$SERVICE" env || true)" - VNC_PORT=$(echo "$ENV_VARS" | grep ^VNC_PORT= | cut -d '=' -f2) - VNC_PORT=${VNC_PORT:0:5} # Strip unncessary characters - - if [ ! -z $VNC_PORT ]; then - echo "$SERVICE service exposes a VNC Server at $HOSTNAME:$VNC_PORT" - - LOCAL_FORWARD="-L ${VNC_PORT}:localhost:${VNC_PORT}" - LOCAL_FORWARD_ALL="${LOCAL_FORWARD_ALL} $LOCAL_FORWARD" - PORTS_STRING="${PORTS_STRING},${VNC_PORT}:${SERVICE}_VNC" - echo " To forward it locally, run" - echo " ssh$LOCAL_FORWARD $USER@$HOSTNAME" - echo " on your local machine attach to VNC at localhost:${VNC_PORT}" - echo - fi - done - - echo "=========================================================================" - echo "To forward all ports locally:" - echo "ssh $LOCAL_FORWARD_ALL $USER@$HOSTNAME" - echo "=========================================================================" +# generate .env file from watod_scripts/watod-setup-env.sh +if [ ! -z $VERBOSE ]; then # if we want to see all verbose coming from setting up env + cd $MONO_DIR && . ./watod_scripts/watod-setup-env.sh +else + cd $MONO_DIR && . ./watod_scripts/watod-setup-env.sh &> /dev/null fi if [ ! -z "$COMPOSE_CMD" ]; then diff --git a/watod_scripts/watod-setup-env.sh b/watod_scripts/watod-setup-env.sh index 383b763c..e8b7227b 100755 --- a/watod_scripts/watod-setup-env.sh +++ b/watod_scripts/watod-setup-env.sh @@ -1,4 +1,5 @@ #!/bin/bash +set -e # This script generates a .env file to be used with docker-compose # To override any of the variables in this script, create watod-config.sh @@ -10,21 +11,7 @@ if [ -f /.dockerenv ]; then exit 1 fi -MONO_DIR="$(dirname "$(realpath "$0")")" -# moves us one level out to the root monorepo directory -MONO_DIR=${MONO_DIR%/*} - -if [ ! -z $MODULES_DIR_EXP ]; then - MODULES_DIR="$MODULES_DIR_EXP" -else - MODULES_DIR="$MONO_DIR/modules" -fi - -# Allow for local overrides of any of the below parameters -if [ -f "$MONO_DIR/watod-config.sh" ]; then - source "$MONO_DIR/watod-config.sh" -fi - +# Retrieve git branch if ! [ -x "$(command -v git)" ]; then echo 'Error: git is not installed.' >&2 else @@ -41,11 +28,6 @@ TAG=$(echo ${TAG:-$BRANCH} | tr / -) # replace / with - TAG=${TAG/\//-} -# Happens during CI, we use the TAG from CI -if [ ! -z $MODULES_DIR_EXP ]; then - TAG="build_$TAG" -fi - # List of active modules to run, defined in docker-compose.yaml. # Possible values: # - infrastructure : starts visualization tools (foxglove and/or vnc) @@ -87,7 +69,6 @@ INFRASTRUCTURE_FOXGLOVE_IMAGE=${DATA_STREAM_IMAGE:-"$REGISTRY_URL/infrastructure PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE=${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/radar_object_detection"} PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE=${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/lidar_object_detection"} PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE=${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/camera_object_detection"} -PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE=${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/traffic_light_detection"} PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE=${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE:-"$REGISTRY_URL/perception/traffic_sign_detection"} PERCEPTION_LANE_DETECTION_IMAGE=${PERCEPTION_LANE_DETECTION_IMAGE:-"$REGISTRY_URL/perception/lane_detection"} PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE=${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE:-"$REGISTRY_URL/perception/semantic_segmentation"} @@ -108,6 +89,10 @@ ACTION_MPC_IMAGE=${ACTION_MPC_IMAGE:-"$REGISTRY_URL/action/model_predictive_cont # Simulation SIMULATION_CARLA_IMAGE=${SIMULATION_CARLA_IMAGE:-"$REGISTRY_URL/simulation/carla_sim"} +SIMULATION_CARLA_ROS_BRIDGE_IMAGE=${SIMULATION_CARLA_ROS_BRIDGE_IMAGE:-"$REGISTRY_URL/simulation/carla_ros_bridge"} +SIMULATION_CARLAVIZ_IMAGE=${SIMULATION_CARLAVIZ_IMAGE:-"$REGISTRY_URL/simulation/carla_viz"} +SIMULATION_CARLA_NOTEBOOKS_IMAGE=${SIMULATION_CARLA_NOTEBOOKS_IMAGE:-"$REGISTRY_URL/simulation/carla_notebooks"} +SIMULATION_CARLA_SAMPLE_NODE_IMAGE=${SIMULATION_CARLA_SAMPLE_NODE_IMAGE:-"$REGISTRY_URL/simulation/carla_sample_node"} # Interfacing INTERFACING_CAN_IMAGE=${INTERFACING_CAN_IMAGE:-"$REGISTRY_URL/interfacing/can_interfacing"} @@ -123,6 +108,9 @@ SETGID=$(id -g) BASE_PORT=${BASE_PORT:-$(($(id -u)*20))} GUI_TOOLS_VNC_PORT=${GUI_TOOLS_VNC_PORT:-$((BASE_PORT++))} FOXGLOVE_BRIDGE_PORT=${FOXGLOVE_BRIDGE_PORT:-$((GUI_TOOLS_VNC_PORT++))} +CARLAVIZ_PORT=${CARLAVIZ_PORT:-$((GUI_TOOLS_VNC_PORT++))} +CARLAVIZ_PORT_2=${CARLAVIZ_PORT_2:-$((GUI_TOOLS_VNC_PORT++))} +CARLA_NOTEBOOKS_PORT=${CARLA_NOTEBOOKS_PORT:-$((GUI_TOOLS_VNC_PORT++))} ## -------------------- Export Environment Variables ------------------------- @@ -150,6 +138,9 @@ echo "SETGID=$SETGID" >> "$MODULES_DIR/.env" echo "BASE_PORT=$BASE_PORT" >> "$MODULES_DIR/.env" echo "GUI_TOOLS_VNC_PORT=$GUI_TOOLS_VNC_PORT" >> "$MODULES_DIR/.env" echo "FOXGLOVE_BRIDGE_PORT=$FOXGLOVE_BRIDGE_PORT" >> "$MODULES_DIR/.env" +echo "CARLAVIZ_PORT=$CARLAVIZ_PORT" >> "$MODULES_DIR/.env" +echo "CARLAVIZ_PORT_2=$CARLAVIZ_PORT_2" >> "$MODULES_DIR/.env" +echo "CARLA_NOTEBOOKS_PORT=$CARLA_NOTEBOOKS_PORT" >> "$MODULES_DIR/.env" echo "REGISTRY=$REGISTRY" >> "$MODULES_DIR/.env" echo "REGISTRY=$REPOSITORY" >> "$MODULES_DIR/.env" @@ -173,7 +164,6 @@ echo "INFRASTRUCTURE_FOXGLOVE_IMAGE=$INFRASTRUCTURE_FOXGLOVE_IMAGE" >> "$MODULES echo "PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE=$PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE=$PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE=$PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" -echo "PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE=$PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE=$PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_LANE_DETECTION_IMAGE=$PERCEPTION_LANE_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE=$PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE" >> "$MODULES_DIR/.env" @@ -194,6 +184,10 @@ echo "ACTION_MPC_IMAGE=$ACTION_MPC_IMAGE" >> "$MODULES_DIR/.env" # Simulation echo "SIMULATION_CARLA_IMAGE=$SIMULATION_CARLA_IMAGE" >> "$MODULES_DIR/.env" +echo "SIMULATION_CARLA_ROS_BRIDGE_IMAGE=$SIMULATION_CARLA_ROS_BRIDGE_IMAGE" >> "$MODULES_DIR/.env" +echo "SIMULATION_CARLAVIZ_IMAGE=$SIMULATION_CARLAVIZ_IMAGE" >> "$MODULES_DIR/.env" +echo "SIMULATION_CARLA_NOTEBOOKS_IMAGE=$SIMULATION_CARLA_NOTEBOOKS_IMAGE" >> "$MODULES_DIR/.env" +echo "SIMULATION_CARLA_SAMPLE_NODE_IMAGE=$SIMULATION_CARLA_SAMPLE_NODE_IMAGE" >> "$MODULES_DIR/.env" # Interfacing echo "INTERFACING_CAN_IMAGE=$INTERFACING_CAN_IMAGE" >> "$MODULES_DIR/.env"