diff --git a/depthai_sdk/docs/requirements.txt b/depthai_sdk/docs/requirements.txt index b0abb2e0d..d75a044fe 100644 --- a/depthai_sdk/docs/requirements.txt +++ b/depthai_sdk/docs/requirements.txt @@ -1,3 +1,4 @@ -Sphinx==4.1.2 +Sphinx==4.2.0 sphinx-rtd-theme==0.5.0 autodocsumm==0.2.10 +sphinx-tabs==3.4.0 \ No newline at end of file diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_age_gender.png b/depthai_sdk/docs/source/_static/images/demos/sdk_age_gender.png new file mode 100644 index 000000000..247f206da Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_age_gender.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_api_interop.png b/depthai_sdk/docs/source/_static/images/demos/sdk_api_interop.png new file mode 100644 index 000000000..403bfad56 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_api_interop.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control.gif b/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control.gif new file mode 100644 index 000000000..18b776ce7 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control.gif differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control_with_NN.png b/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control_with_NN.png new file mode 100644 index 000000000..501ade119 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control_with_NN.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_camera_preview.png b/depthai_sdk/docs/source/_static/images/demos/sdk_camera_preview.png new file mode 100644 index 000000000..53b70263c Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_camera_preview.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_car_tracking.gif b/depthai_sdk/docs/source/_static/images/demos/sdk_car_tracking.gif new file mode 100644 index 000000000..d8235b131 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_car_tracking.gif differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_collision_avoidance.gif b/depthai_sdk/docs/source/_static/images/demos/sdk_collision_avoidance.gif new file mode 100644 index 000000000..18743895c Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_collision_avoidance.gif differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_counter.gif b/depthai_sdk/docs/source/_static/images/demos/sdk_counter.gif new file mode 100644 index 000000000..6ee0f3bdb Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_counter.gif differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_emotion_recognition.gif b/depthai_sdk/docs/source/_static/images/demos/sdk_emotion_recognition.gif new file mode 100644 index 000000000..3364c271a Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_emotion_recognition.gif differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_color.png b/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_color.png new file mode 100644 index 000000000..416893db7 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_color.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_left.png b/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_left.png new file mode 100644 index 000000000..f7a7af173 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_left.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_human_pose.gif b/depthai_sdk/docs/source/_static/images/demos/sdk_human_pose.gif new file mode 100644 index 000000000..9347fc5fd Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_human_pose.gif differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_imu_demo.png b/depthai_sdk/docs/source/_static/images/demos/sdk_imu_demo.png new file mode 100644 index 000000000..f73114c3b Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_imu_demo.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_imu_rerun.png b/depthai_sdk/docs/source/_static/images/demos/sdk_imu_rerun.png new file mode 100644 index 000000000..aa8a5d823 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_imu_rerun.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_mono_400p.png b/depthai_sdk/docs/source/_static/images/demos/sdk_mono_400p.png new file mode 100644 index 000000000..d2fe33144 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_mono_400p.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_object_tracking.png b/depthai_sdk/docs/source/_static/images/demos/sdk_object_tracking.png new file mode 100644 index 000000000..15d530115 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_object_tracking.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_photo_download.png b/depthai_sdk/docs/source/_static/images/demos/sdk_photo_download.png new file mode 100644 index 000000000..9747ad026 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_photo_download.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_pointcloud.gif b/depthai_sdk/docs/source/_static/images/demos/sdk_pointcloud.gif new file mode 100644 index 000000000..fcdada9bb Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_pointcloud.gif differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_preview_all_cameras.png b/depthai_sdk/docs/source/_static/images/demos/sdk_preview_all_cameras.png new file mode 100644 index 000000000..f79016349 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_preview_all_cameras.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_rotated.png b/depthai_sdk/docs/source/_static/images/demos/sdk_rotated.png new file mode 100644 index 000000000..62431fb08 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_rotated.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_spatial_detection.png b/depthai_sdk/docs/source/_static/images/demos/sdk_spatial_detection.png new file mode 100644 index 000000000..0d21e93c9 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_spatial_detection.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_speed_calculation.gif b/depthai_sdk/docs/source/_static/images/demos/sdk_speed_calculation.gif new file mode 100644 index 000000000..a85568a66 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_speed_calculation.gif differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_auto_ir.png b/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_auto_ir.png new file mode 100644 index 000000000..07c587980 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_auto_ir.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_control.gif b/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_control.gif new file mode 100644 index 000000000..be73dc6e4 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_control.gif differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_sync_multiple_outputs.png b/depthai_sdk/docs/source/_static/images/demos/sdk_sync_multiple_outputs.png new file mode 100644 index 000000000..cac145d1f Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_sync_multiple_outputs.png differ diff --git a/depthai_sdk/docs/source/_static/images/demos/sdk_visualizer_callback.png b/depthai_sdk/docs/source/_static/images/demos/sdk_visualizer_callback.png new file mode 100644 index 000000000..101945ee3 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/demos/sdk_visualizer_callback.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/age-gender.png b/depthai_sdk/docs/source/_static/images/pipelines/age-gender.png new file mode 100644 index 000000000..54ff08a9e Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/age-gender.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/api_interop.png b/depthai_sdk/docs/source/_static/images/pipelines/api_interop.png new file mode 100644 index 000000000..1bb5e184c Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/api_interop.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/cam_ffc.png b/depthai_sdk/docs/source/_static/images/pipelines/cam_ffc.png new file mode 100644 index 000000000..662bb58a3 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/cam_ffc.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/camera_control.png b/depthai_sdk/docs/source/_static/images/pipelines/camera_control.png new file mode 100644 index 000000000..9363b4cea Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/camera_control.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/camera_control_with_NN.png b/depthai_sdk/docs/source/_static/images/pipelines/camera_control_with_NN.png new file mode 100644 index 000000000..7e628fddb Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/camera_control_with_NN.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/camera_preview.png b/depthai_sdk/docs/source/_static/images/pipelines/camera_preview.png new file mode 100644 index 000000000..2e6d8239e Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/camera_preview.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/car_tracking.png b/depthai_sdk/docs/source/_static/images/pipelines/car_tracking.png new file mode 100644 index 000000000..2c5f7559f Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/car_tracking.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/collision_avoidance.png b/depthai_sdk/docs/source/_static/images/pipelines/collision_avoidance.png new file mode 100644 index 000000000..e4a24af00 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/collision_avoidance.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/counter.png b/depthai_sdk/docs/source/_static/images/pipelines/counter.png new file mode 100644 index 000000000..e281bc888 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/counter.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/custom_action.png b/depthai_sdk/docs/source/_static/images/pipelines/custom_action.png new file mode 100644 index 000000000..9ee1c0309 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/custom_action.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/custom_decode.png b/depthai_sdk/docs/source/_static/images/pipelines/custom_decode.png new file mode 100644 index 000000000..be7d5a96a Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/custom_decode.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/custom_trigger.png b/depthai_sdk/docs/source/_static/images/pipelines/custom_trigger.png new file mode 100644 index 000000000..d469e843f Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/custom_trigger.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/deeplabv3_person.png b/depthai_sdk/docs/source/_static/images/pipelines/deeplabv3_person.png new file mode 100644 index 000000000..ece1e27a8 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/deeplabv3_person.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/emotion_recognition.png b/depthai_sdk/docs/source/_static/images/pipelines/emotion_recognition.png new file mode 100644 index 000000000..bbd292ae2 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/emotion_recognition.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/encode.png b/depthai_sdk/docs/source/_static/images/pipelines/encode.png new file mode 100644 index 000000000..2a4718d3e Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/encode.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/encoder_preview.png b/depthai_sdk/docs/source/_static/images/pipelines/encoder_preview.png new file mode 100644 index 000000000..97471e61a Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/encoder_preview.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/face_detection_color.png b/depthai_sdk/docs/source/_static/images/pipelines/face_detection_color.png new file mode 100644 index 000000000..efe7cce5c Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/face_detection_color.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/face_detection_left.png b/depthai_sdk/docs/source/_static/images/pipelines/face_detection_left.png new file mode 100644 index 000000000..0b3481174 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/face_detection_left.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/human_pose.png b/depthai_sdk/docs/source/_static/images/pipelines/human_pose.png new file mode 100644 index 000000000..6db9e5cdb Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/human_pose.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/imu.png b/depthai_sdk/docs/source/_static/images/pipelines/imu.png new file mode 100644 index 000000000..9101c44ec Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/imu.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/looped_replay.png b/depthai_sdk/docs/source/_static/images/pipelines/looped_replay.png new file mode 100644 index 000000000..635785e83 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/looped_replay.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/mobilenet_encoded.png b/depthai_sdk/docs/source/_static/images/pipelines/mobilenet_encoded.png new file mode 100644 index 000000000..aa80d70f4 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/mobilenet_encoded.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/mono_400p.png b/depthai_sdk/docs/source/_static/images/pipelines/mono_400p.png new file mode 100644 index 000000000..2528e6ddf Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/mono_400p.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/nn_component.png b/depthai_sdk/docs/source/_static/images/pipelines/nn_component.png new file mode 100644 index 000000000..1fdf9538a Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/nn_component.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/object_tracking.png b/depthai_sdk/docs/source/_static/images/pipelines/object_tracking.png new file mode 100644 index 000000000..084af5d73 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/object_tracking.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/people_tracker.png b/depthai_sdk/docs/source/_static/images/pipelines/people_tracker.png new file mode 100644 index 000000000..9d2a15e82 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/people_tracker.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/person_record.png b/depthai_sdk/docs/source/_static/images/pipelines/person_record.png new file mode 100644 index 000000000..7752b0a69 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/person_record.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/photo_download.png b/depthai_sdk/docs/source/_static/images/pipelines/photo_download.png new file mode 100644 index 000000000..a86f4d0ee Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/photo_download.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/pointcloud.png b/depthai_sdk/docs/source/_static/images/pipelines/pointcloud.png new file mode 100644 index 000000000..c95c669f0 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/pointcloud.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/preview_all_cameras.png b/depthai_sdk/docs/source/_static/images/pipelines/preview_all_cameras.png new file mode 100644 index 000000000..0937354e8 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/preview_all_cameras.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/recording_duration.png b/depthai_sdk/docs/source/_static/images/pipelines/recording_duration.png new file mode 100644 index 000000000..8ef3d998f Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/recording_duration.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/rgb_mono_preview.png b/depthai_sdk/docs/source/_static/images/pipelines/rgb_mono_preview.png new file mode 100644 index 000000000..0fb45236a Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/rgb_mono_preview.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/roboflow_integration.png b/depthai_sdk/docs/source/_static/images/pipelines/roboflow_integration.png new file mode 100644 index 000000000..e161f2114 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/roboflow_integration.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/rosbag_record.png b/depthai_sdk/docs/source/_static/images/pipelines/rosbag_record.png new file mode 100644 index 000000000..ff8efe177 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/rosbag_record.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/rotated.png b/depthai_sdk/docs/source/_static/images/pipelines/rotated.png new file mode 100644 index 000000000..175e46656 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/rotated.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/spatial_detection.png b/depthai_sdk/docs/source/_static/images/pipelines/spatial_detection.png new file mode 100644 index 000000000..d1fd98e2c Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/spatial_detection.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/stereo.png b/depthai_sdk/docs/source/_static/images/pipelines/stereo.png new file mode 100644 index 000000000..bf3127d6d Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/stereo.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/stereo_auto_ir.png b/depthai_sdk/docs/source/_static/images/pipelines/stereo_auto_ir.png new file mode 100644 index 000000000..cc89c215b Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/stereo_auto_ir.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/stereo_control.png b/depthai_sdk/docs/source/_static/images/pipelines/stereo_control.png new file mode 100644 index 000000000..1e7abaea2 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/stereo_control.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/stereo_encoded.png b/depthai_sdk/docs/source/_static/images/pipelines/stereo_encoded.png new file mode 100644 index 000000000..2123adefd Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/stereo_encoded.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/stereo_record.png b/depthai_sdk/docs/source/_static/images/pipelines/stereo_record.png new file mode 100644 index 000000000..85c1d6df5 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/stereo_record.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/switch_between_models.png b/depthai_sdk/docs/source/_static/images/pipelines/switch_between_models.png new file mode 100644 index 000000000..48a27876c Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/switch_between_models.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/sync_multiple_outputs.png b/depthai_sdk/docs/source/_static/images/pipelines/sync_multiple_outputs.png new file mode 100644 index 000000000..5e25164b5 Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/sync_multiple_outputs.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/visualizer.png b/depthai_sdk/docs/source/_static/images/pipelines/visualizer.png new file mode 100644 index 000000000..319143e3f Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/visualizer.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/visualizer_callback.png b/depthai_sdk/docs/source/_static/images/pipelines/visualizer_callback.png new file mode 100644 index 000000000..319143e3f Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/visualizer_callback.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/yolo.png b/depthai_sdk/docs/source/_static/images/pipelines/yolo.png new file mode 100644 index 000000000..c6b03850a Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/yolo.png differ diff --git a/depthai_sdk/docs/source/_static/images/pipelines/youtube_download.png b/depthai_sdk/docs/source/_static/images/pipelines/youtube_download.png new file mode 100644 index 000000000..a3c4849bc Binary files /dev/null and b/depthai_sdk/docs/source/_static/images/pipelines/youtube_download.png differ diff --git a/depthai_sdk/docs/source/components/camera_component.rst b/depthai_sdk/docs/source/components/camera_component.rst index b52a01b5c..0de405eed 100644 --- a/depthai_sdk/docs/source/components/camera_component.rst +++ b/depthai_sdk/docs/source/components/camera_component.rst @@ -5,6 +5,7 @@ CameraComponent nodes and supports mocking the camera when recording is passed during OakCamera initialization. When using :ref:`Replaying` feature, this component will mock the camera by sending frames from the host to the OAK device (via `XLinkIn `__ node). + Usage ##### diff --git a/depthai_sdk/docs/source/conf.py b/depthai_sdk/docs/source/conf.py index 387f97937..884eca56a 100644 --- a/depthai_sdk/docs/source/conf.py +++ b/depthai_sdk/docs/source/conf.py @@ -36,7 +36,8 @@ "sphinx_rtd_theme", 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', - 'autodocsumm' + 'autodocsumm', + 'sphinx_tabs.tabs' ] # Add any paths that contain templates here, relative to this directory. diff --git a/depthai_sdk/docs/source/includes/blocking_behavior.rst b/depthai_sdk/docs/source/includes/blocking_behavior.rst new file mode 100644 index 000000000..5443cff6d --- /dev/null +++ b/depthai_sdk/docs/source/includes/blocking_behavior.rst @@ -0,0 +1,3 @@ +.. note:: + Visualization in current example is done with blocking behavor. This means that the program will halt at ``oak.start()`` until the window is closed. + This is done to keep the example simple. For more advanced usage, see :ref:`Blocking behavior` section. \ No newline at end of file diff --git a/depthai_sdk/docs/source/includes/install_from_pypi.rst b/depthai_sdk/docs/source/includes/install_from_pypi.rst new file mode 100644 index 000000000..cbc0f91f5 --- /dev/null +++ b/depthai_sdk/docs/source/includes/install_from_pypi.rst @@ -0,0 +1,12 @@ +Please run the `install script `__ +to download all required dependencies. Please note that this script must be ran from git context, so you have to download the `depthai `__ repository first and then run the script + + +.. code-block:: python + + git clone https://github.com/luxonis/depthai.git + cd depthai/ + python3 install_requirements.py + + +For additional information, please follow our :ref:`installation guide `. diff --git a/depthai_sdk/docs/source/index.rst b/depthai_sdk/docs/source/index.rst index e25244311..a8dbe270e 100644 --- a/depthai_sdk/docs/source/index.rst +++ b/depthai_sdk/docs/source/index.rst @@ -39,6 +39,15 @@ ease of use when developing apps for OAK devices**. features/* + +.. toctree:: + :maxdepth: 1 + :hidden: + :caption: Examples + + tutorials/code_samples.rst + + .. toctree:: :maxdepth: 2 :hidden: @@ -46,3 +55,4 @@ ease of use when developing apps for OAK devices**. :caption: References api_reference.rst + diff --git a/depthai_sdk/docs/source/quickstart.rst b/depthai_sdk/docs/source/quickstart.rst index 5743184a3..4edb203db 100644 --- a/depthai_sdk/docs/source/quickstart.rst +++ b/depthai_sdk/docs/source/quickstart.rst @@ -18,6 +18,38 @@ This class simplifies the creation of pipelines that capture video from the OAK With :class:`OakCamera `, you can easily create color and depth streams using the :meth:`create_camera() ` and :meth:`create_stereo() ` methods respectively, and add pre-trained neural networks using the :meth:`create_nn() ` method. Additionally, you can add custom callbacks to the pipeline using the :meth:`callback() ` method and record the outputs using the :meth:`record() ` method. +Blocking behavior +^^^^^^^^^^^^^^^^^ + +When starting the :class:`OakCamera ` object, you can specify whether the :meth:`start() ` method should block the main thread or not. By default, the :meth:`start() ` method does not block the main thread, which means you will need to manually poll the camera using the :meth:`oak.poll() ` method. + +.. code-block:: python + + from depthai_sdk import OakCamera + + with OakCamera() as oak: + color = oak.create_camera('color', resolution='1080p') + oak.visualize([color]) + oak.start(blocking=False) + + while oak.running(): + oak.poll() + # this code is executed while the pipeline is running + + +Alternatively, setting the ``blocking`` argument to ``True`` will loop and continuously poll the camera, blocking the rest of the code. + + +.. code-block:: python + + from depthai_sdk import OakCamera + + with OakCamera() as oak: + color = oak.create_camera('color', resolution='1080p') + oak.visualize([color]) + oak.start(blocking=True) + # this code doesn't execute until the pipeline is stopped + Creating color and depth streams --------------------- diff --git a/depthai_sdk/docs/source/samples/CameraComponent/sdk_cam_ffc.rst b/depthai_sdk/docs/source/samples/CameraComponent/sdk_cam_ffc.rst new file mode 100644 index 000000000..bfcbca398 --- /dev/null +++ b/depthai_sdk/docs/source/samples/CameraComponent/sdk_cam_ffc.rst @@ -0,0 +1,37 @@ +FFC Camera Visualization +======================== + +This example shows how to use the `Camera` component to display the camera feed from the FFC camera. + +For FFC, the camera board socket must be specified. In our case the cameras are connected to socket A, B and C. After setting the resolution to 1200p +and downscaling using ISP to 800p, the camera feed is displayed in a window. + +.. include:: /includes/blocking_behavior.rst + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/cam_ffc.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CameraComponent/cam_ffc.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_control.rst b/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_control.rst new file mode 100644 index 000000000..d5d6a275f --- /dev/null +++ b/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_control.rst @@ -0,0 +1,49 @@ +Camera Control +============== + +This example shows how to use DepthAI SDK to control the color camera parameters. + +.. code-block:: + + Control: key[dec/inc] min..max + exposure time: I O 1..33000 [us] + sensitivity iso: K L 100..1600 + + To go back to auto controls: + 'E' - autoexposure + + +Demo +#### + +.. image:: /_static/images/demos/sdk_camera_control.gif + :alt: Camera Control Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/camera_control.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CameraComponent/camera_control.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_control_with_nn.rst b/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_control_with_nn.rst new file mode 100644 index 000000000..23db6a3c5 --- /dev/null +++ b/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_control_with_nn.rst @@ -0,0 +1,41 @@ +Camera Control with NN +===================== + +This example shows how to set up control of color camera (focus and exposure) to be controlled by NN. The NN is a face detection model which passes detected face +bounding box to camera component run auto focus and auto exposure algorithms on. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### + +.. image:: /_static/images/demos/sdk_camera_control_with_NN.png + :alt: Control with NN Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/camera_control_with_NN.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CameraComponent/camera_control_with_NN.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_preview.rst b/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_preview.rst new file mode 100644 index 000000000..7b6e02a50 --- /dev/null +++ b/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_preview.rst @@ -0,0 +1,43 @@ +Camera Preview +============== + +This example shows how to set up a pipeline that outputs a a preview for color camera, both mono cameras and their stereo depth. Each frame is displayed using OpenCV in blocking behavour. + +.. include:: /includes/blocking_behavior.rst + + + +Demo +#### + +.. image:: /_static/images/demos/sdk_camera_preview.png + :alt: Camera Preview Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/camera_preview.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CameraComponent/camera_preview.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/CameraComponent/sdk_mono_400p.rst b/depthai_sdk/docs/source/samples/CameraComponent/sdk_mono_400p.rst new file mode 100644 index 000000000..2902419e6 --- /dev/null +++ b/depthai_sdk/docs/source/samples/CameraComponent/sdk_mono_400p.rst @@ -0,0 +1,40 @@ +Mono Camera Preview +=================== + +This example shows how to set up a pipeline that outputs a video feed for both mono cameras and sets the resolution to 400p (640x400) and the frame rate to 60 fps. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### + +.. image:: /_static/images/demos/sdk_mono_400p.png + :alt: Mono Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/mono_400p.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CameraComponent/mono_400p.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/CameraComponent/sdk_preview_all_cameras.rst b/depthai_sdk/docs/source/samples/CameraComponent/sdk_preview_all_cameras.rst new file mode 100644 index 000000000..a765b98c1 --- /dev/null +++ b/depthai_sdk/docs/source/samples/CameraComponent/sdk_preview_all_cameras.rst @@ -0,0 +1,41 @@ +Preview All Cameras +=================== + +This example shows how to set up a pipeline that outputs a a preview for each camera currently connected (and available) to the device. The preview is displayed in a window on the host machine. +If run on OAK-D devices, this example does the same thing as the ``sdk_camera_preview`` example. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### + +.. image:: /_static/images/demos/sdk_preview_all_cameras.png + :alt: Camera Preview Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/preview_all_cameras.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CameraComponent/preview_all_cameras.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/CameraComponent/sdk_rgb_mono_preview.rst b/depthai_sdk/docs/source/samples/CameraComponent/sdk_rgb_mono_preview.rst new file mode 100644 index 000000000..fa70f29ce --- /dev/null +++ b/depthai_sdk/docs/source/samples/CameraComponent/sdk_rgb_mono_preview.rst @@ -0,0 +1,38 @@ +RGB and Mono Preview +==================== + +This example shows how to use the `Camera` component to get RGB and Mono previews. It is similar to the ref:`sdk_camera_preview` example, but lacks the stereo depth visualization. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_preview_all_cameras.png + :alt: RGB and Mono Preview + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/rgb_mono_preview.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CameraComponent/rgb_mono_preview.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/CameraComponent/sdk_rotated.rst b/depthai_sdk/docs/source/samples/CameraComponent/sdk_rotated.rst new file mode 100644 index 000000000..81ce2c1ec --- /dev/null +++ b/depthai_sdk/docs/source/samples/CameraComponent/sdk_rotated.rst @@ -0,0 +1,39 @@ +Camera Rotated Preview +====================== + +This example showcases how to rotate the preview frames by a desired angle (currently only 90, 180 and 270 degrees are supported). + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_rotated.png + :alt: Rotated preview + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/rotated.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CameraComponent/rotated.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/IMUComponent/sdk_imu.rst b/depthai_sdk/docs/source/samples/IMUComponent/sdk_imu.rst new file mode 100644 index 000000000..21a46f70e --- /dev/null +++ b/depthai_sdk/docs/source/samples/IMUComponent/sdk_imu.rst @@ -0,0 +1,42 @@ +IMU Demonstration +================= + +This example showcases how to use the integrated `IMU sensor `__ on the OAK-D board with the Depthai sdk. In our example +we set the IMU to output data at 400Hz, and batch size to 5. This means we get 5 IMU readings every 12.5ms (2.5ms per reading * 5). We then print out the IMU data to the console. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### + +.. image:: /_static/images/demos/sdk_imu_demo.png + :alt: IMU Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/imu.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__. + + .. literalinclude:: ../../../../examples/IMUComponent/imu.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/IMUComponent/sdk_imu_rerun.rst b/depthai_sdk/docs/source/samples/IMUComponent/sdk_imu_rerun.rst new file mode 100644 index 000000000..50d58312e --- /dev/null +++ b/depthai_sdk/docs/source/samples/IMUComponent/sdk_imu_rerun.rst @@ -0,0 +1,42 @@ +IMU Rerun Demonstration +======================= + +This example showcases how to use the integrated `IMU sensor `__ on the OAK-D board. In this example, the displaying is done with `Rerun `__ (the same core as our `DepthAI Viewer `__). + + +.. include:: /includes/blocking_behavior.rst + +Demo +#### + +.. image:: /_static/images/demos/sdk_imu_rerun.png + :alt: IMU Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/imu.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__. + + .. literalinclude:: ../../../../examples/IMUComponent/imu_rerun.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_age_gender.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_age_gender.rst new file mode 100644 index 000000000..9a93a09e5 --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_age_gender.rst @@ -0,0 +1,38 @@ +Age-Gender Inference +==================== + +This example showcases the usage of multi-stage neural network pipeline to make age and gender inference on a video frame. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_age_gender.png + :alt: Age/gender demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/age-gender.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/NNComponent/age-gender.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_custom_decode.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_custom_decode.rst new file mode 100644 index 000000000..6a37b7efd --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_custom_decode.rst @@ -0,0 +1,36 @@ +Custom Decode Function +====================== + +This example showcases the usage of custom decoding functions for the neural network component. More info is available inside the function itself. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/custom_decode.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/NNComponent/custom_decode.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_deeplabv3_person.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_deeplabv3_person.rst new file mode 100644 index 000000000..160d974b9 --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_deeplabv3_person.rst @@ -0,0 +1,46 @@ +Deeplabv3 Person Segmentation +============================= + +This example showcases the implementation of deepLabv3 person segmentation model with DepthAI SDK. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/deeplabv3_person.png + :alt: Pipeline graph + + + +Source Code +########### + +One thing worth noting is the resize mode option. Because inference is done on a color camera which has a 16:9 aspect ratio, and the model expects a 1:1 aspect ratio, we need +to resize the input frame to fit the model. This is done in three ways: + - letterbox - resize the frame to fit the model, and pad the rest with black pixels + - crop - crop the frame to fit the model + - stretch - stretch the frame to fit the model + +More information at `Maximizing FOV `__. + + +.. tabs:: + + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/NNComponent/deeplabv3_person.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_emotion_recognition.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_emotion_recognition.rst new file mode 100644 index 000000000..bd6d02d41 --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_emotion_recognition.rst @@ -0,0 +1,39 @@ +Emotion Recognition +=================== + +This example showcases the implementation of two stage neural network pipeline, where the first stage is a face detection network, and the second stage is an emotion recognition model. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_emotion_recognition.gif + :alt: Emotion Recognition Demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/emotion_recognition.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/NNComponent/emotion-recognition.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_face_detection_color.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_face_detection_color.rst new file mode 100644 index 000000000..8cde34eaa --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_face_detection_color.rst @@ -0,0 +1,42 @@ +Face Detection RGB +================== + +This example shows how to run face detection on RGB camera input using SDK. + +For running the same face detection on mono camera, see :ref:`Face Detection Mono`. + + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_face_detection_color.png + :alt: RGB face detection demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/face_detection_color.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/NNComponent/face_detection_color.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_face_detection_left.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_face_detection_left.rst new file mode 100644 index 000000000..158814bd7 --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_face_detection_left.rst @@ -0,0 +1,40 @@ +Face Detection Mono +================== + +This example shows how to run face detection on Mono camera input using SDK. + +For running the same face detection on RGB camera, see :ref:`Face Detection RGB`. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_face_detection_left.png + :alt: Mono camera face detection demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/face_detection_left.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/NNComponent/face_detection_left.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_human_pose.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_human_pose.rst new file mode 100644 index 000000000..64fd2a855 --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_human_pose.rst @@ -0,0 +1,38 @@ +Human Pose Estimation +===================== + +This example showcases the implementation of a human pose estimation network using the DepthAI SDK. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_human_pose.gif + :alt: Human Pose Estimation Demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/human_pose.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/NNComponent/human_pose.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_mobilenet_encoded.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_mobilenet_encoded.rst new file mode 100644 index 000000000..6d8087c81 --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_mobilenet_encoded.rst @@ -0,0 +1,35 @@ +MobileNet Encoded +================= + +This example shows how to run an encoded RGB stream through a neural network and display the encoded results. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/mobilenet_encoded.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/NNComponent/mobilenet_encoded.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_nn_component.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_nn_component.rst new file mode 100644 index 000000000..2778b059f --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_nn_component.rst @@ -0,0 +1,38 @@ +Neural Network Component +======================== + +This example shows how to run run a color camera stream through a YoloV7 model and display the results on the host. + +For additional models, check: `models supported by SDK `__ + + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/nn_component.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__. + + .. literalinclude:: ../../../../examples/NNComponent/nn_component.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_object_tracking.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_object_tracking.rst new file mode 100644 index 000000000..8e768f41d --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_object_tracking.rst @@ -0,0 +1,43 @@ +Object Tracking +=============== + +This example showcases the usage of object tracking in Depthai SDK. + +For more information about tracker configuration, please refer to `config tracker reference `__. + + + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_object_tracking.png + :alt: Object Tracking Demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + + +Pipeline +######## + +.. image:: /_static/images/pipelines/object_tracking.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__. + + .. literalinclude:: ../../../../examples/NNComponent/object_tracking.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_roboflow_integration.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_roboflow_integration.rst new file mode 100644 index 000000000..43befa343 --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_roboflow_integration.rst @@ -0,0 +1,36 @@ +Roboflow Integration +==================== + +This example showcases the usage of the `ROBOFLOW `__ platform to train a custom object detection model and use it with DepthAI SDK. + + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/roboflow_integration.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__. + + .. literalinclude:: ../../../../examples/NNComponent/roboflow_integration.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_spatial_detection.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_spatial_detection.rst new file mode 100644 index 000000000..d6b07d778 --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_spatial_detection.rst @@ -0,0 +1,43 @@ +Spatial Detection +================= + +This example showcases the usage of spatial detection using MobileNet-SSD neural network. + +For more information about spatial configuration (thresholds, averaging), please refer to `config spatial reference `__. + + + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_spatial_detection.png + :alt: Spatial Detection Demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/spatial_detection.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + + .. literalinclude:: ../../../../examples/NNComponent/spatial_detection.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/NNComponent/sdk_yolo.rst b/depthai_sdk/docs/source/samples/NNComponent/sdk_yolo.rst new file mode 100644 index 000000000..65660f7f8 --- /dev/null +++ b/depthai_sdk/docs/source/samples/NNComponent/sdk_yolo.rst @@ -0,0 +1,43 @@ +YOLO SDK +======== + +This example showcases the implementation of Yolov3 object detection network with DepthAI SDK. + +For more information about tracker configuration, please refer to `config tracker reference `__. + + + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_api_interop.png + :alt: YOLO demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/yolo.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__. + + + .. literalinclude:: ../../../../examples/NNComponent/yolo.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/PointcloudComponent/SDK_pointcloud.rst b/depthai_sdk/docs/source/samples/PointcloudComponent/SDK_pointcloud.rst new file mode 100644 index 000000000..64e6bec07 --- /dev/null +++ b/depthai_sdk/docs/source/samples/PointcloudComponent/SDK_pointcloud.rst @@ -0,0 +1,44 @@ +Pointcloud Demo +=============== + +This example shows how to create and display pointclouds with DepthAI SDK. + + +.. include:: /includes/blocking_behavior.rst + + +Demo +#### + +.. image:: /_static/images/demos/sdk_pointcloud.gif + :alt: Pointcloud Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/pointcloud.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + + .. literalinclude:: ../../../../examples/PointcloudComonent/pointcloud.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo.rst b/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo.rst new file mode 100644 index 000000000..94f597bdf --- /dev/null +++ b/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo.rst @@ -0,0 +1,37 @@ +Stereo Preview +============== + +This example shows how to display WLS filtered disparity map using OpenCV. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/stereo.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/StereoComponent/stereo.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_auto_ir.rst b/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_auto_ir.rst new file mode 100644 index 000000000..70e7e3614 --- /dev/null +++ b/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_auto_ir.rst @@ -0,0 +1,38 @@ +Auto IR Brightness +================== + +This example shows how to use the automatic IR brightness feature of the DepthAI Stereo Camera. +The function ``set_auto_ir(auto_mode=True)`` enables/disables auto IR dot projector and flood brightness. If enabled, it selects the best IR brightness level automatically. + +Can be set to continious mode, which will continuously adjust the IR brightness. Set to ``False`` by default and which will automatically adjust the IR brightness only at device bootup. + +.. include:: /includes/blocking_behavior.rst + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/stereo_auto_ir.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/StereoComponent/stereo_auto_ir.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_control.rst b/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_control.rst new file mode 100644 index 000000000..2ee08c452 --- /dev/null +++ b/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_control.rst @@ -0,0 +1,54 @@ +Stereo Control +============== + +This example shows how to change stereo parameter such as confidence threshold, median filter and decimating factor on the fly. + +.. code-block:: + + Control: key[dec/inc] min..max + Confidence threshold: I O 1....255 + + Switches: + 'K' - Switch median filter + '1' - Switch to decimation factor 1 + '2' - Switch to decimation factor 2 + '3' - Switch to decimation factor 3 + +.. include:: /includes/blocking_behavior.rst + + +Demo +#### + +.. image:: /_static/images/demos/sdk_camera_control.gif + :alt: Camera Preview Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + + +Pipeline +######## + +.. image:: /_static/images/pipelines/stereo_control.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/StereoComponent/stereo_control.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_encoded.rst b/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_encoded.rst new file mode 100644 index 000000000..cab07248c --- /dev/null +++ b/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_encoded.rst @@ -0,0 +1,36 @@ +Stereo Encoding +=============== + +This example shows how to encode disparity map and display it using OpenCV. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/stereo_encoded.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/StereoComponent/stereo_encoded.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/examples/color_example.rst b/depthai_sdk/docs/source/samples/color_example.rst similarity index 100% rename from depthai_sdk/docs/source/examples/color_example.rst rename to depthai_sdk/docs/source/samples/color_example.rst diff --git a/depthai_sdk/docs/source/samples/mixed/sdk_api_interop.rst b/depthai_sdk/docs/source/samples/mixed/sdk_api_interop.rst new file mode 100644 index 000000000..94c96ff44 --- /dev/null +++ b/depthai_sdk/docs/source/samples/mixed/sdk_api_interop.rst @@ -0,0 +1,41 @@ +API Interoperability Example +============================ + +This example shows how to bridge the DepthAI API with the SDK. It first creates the color camera and mobilenet neural network and displays the results. +With `oak.build()` we build the pipeline which is part of the API. We can then manipulate the pipeline just like we would in the API (e.g. add Xlink connections, scripts, ...). +In this example we manually add a feature tracker since the SDK currently does not support it. We then start the pipeline and display the results. + +Note that in this case, the visualizer behavior is non-blocking. This means we need to poll the visualizer in order to get the results. + +Demo +#### +.. image:: /_static/images/demos/sdk_api_interop.png + :alt: Api Interop Demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/api_interop.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/mixed/api_interop.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/mixed/sdk_car_tracking.rst b/depthai_sdk/docs/source/samples/mixed/sdk_car_tracking.rst new file mode 100644 index 000000000..91e7ca1ff --- /dev/null +++ b/depthai_sdk/docs/source/samples/mixed/sdk_car_tracking.rst @@ -0,0 +1,39 @@ +Car Tracking Example +==================== + +This example shows how to use SDK to run inference on a pre-saved video file and display the results. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_car_tracking.gif + :alt: Car Tracking Demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/car_tracking.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/mixed/car_tracking.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/mixed/sdk_collision_avoidance.rst b/depthai_sdk/docs/source/samples/mixed/sdk_collision_avoidance.rst new file mode 100644 index 000000000..dbd5f5eb3 --- /dev/null +++ b/depthai_sdk/docs/source/samples/mixed/sdk_collision_avoidance.rst @@ -0,0 +1,40 @@ +Collision Avoidance +=================== + +This example shows how to set up a depth based collision avoidance system for proximity. This can be used with supervised robotic operation where the goal is to +limit the robot's speed when a person is detected in front of it. + + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_collision_avoidance.gif + :alt: Collision Avoidance Demo +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/collision_avoidance.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/mixed/collision_avoidance.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/mixed/sdk_speed_calculation.rst b/depthai_sdk/docs/source/samples/mixed/sdk_speed_calculation.rst new file mode 100644 index 000000000..65fc004e7 --- /dev/null +++ b/depthai_sdk/docs/source/samples/mixed/sdk_speed_calculation.rst @@ -0,0 +1,41 @@ +Speed Calculation Preview +========================= + +This example showcases the use of callback function inside the visualizer to log speed and draw tracking information. +.. include:: /includes/blocking_behavior.rst + + +Demo +#### +.. image:: /_static/images/demos/sdk_speed_calculation.gif + :alt: Speed Calculation Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/speed_calculation.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/mixed/speed_calculation.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/mixed/sdk_switch_between_models.rst b/depthai_sdk/docs/source/samples/mixed/sdk_switch_between_models.rst new file mode 100644 index 000000000..60182f5e6 --- /dev/null +++ b/depthai_sdk/docs/source/samples/mixed/sdk_switch_between_models.rst @@ -0,0 +1,34 @@ +Switch Between Models +===================== + +This example shows how to switch between models on the fly. It uses script node to alter pipeline flow (either to use the yolo model or the mobilenet model). + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/switch_between_models.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/mixed/switch_between_models.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/mixed/sdk_sync_multiple_outputs.rst b/depthai_sdk/docs/source/samples/mixed/sdk_sync_multiple_outputs.rst new file mode 100644 index 000000000..5c67ee081 --- /dev/null +++ b/depthai_sdk/docs/source/samples/mixed/sdk_sync_multiple_outputs.rst @@ -0,0 +1,41 @@ +Sync Multiple Outputs +===================== + +This example shows how to apply software syncing to different outputs of the OAK device. In this example, the color stream is synced with two NeuralNetworks and passthrough. + + +.. include:: /includes/blocking_behavior.rst + +Demo +#### + +.. image:: /_static/images/demos/sdk_sync_multiple_outputs.png + :alt: Mono Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/sync_multiple_outputs.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CameraComponent/sync_multiple_outputs.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/recording/SDK_encode.rst b/depthai_sdk/docs/source/samples/recording/SDK_encode.rst new file mode 100644 index 000000000..bc41b9da9 --- /dev/null +++ b/depthai_sdk/docs/source/samples/recording/SDK_encode.rst @@ -0,0 +1,37 @@ +Encode Multiple Streams +======================= + +This example showcases how to encode video from the camera and save it to a file. Possible encodings are: ``H264``, ``H265`` and ``MJPEG``. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/encode.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + + .. literalinclude:: ../../../../examples/recording/encode.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/recording/SDK_encoder_preview.rst b/depthai_sdk/docs/source/samples/recording/SDK_encoder_preview.rst new file mode 100644 index 000000000..ef5b43953 --- /dev/null +++ b/depthai_sdk/docs/source/samples/recording/SDK_encoder_preview.rst @@ -0,0 +1,36 @@ +Preview Encoder +=============== + +This example shows how to use the callback function to write MJPEG encoded frames from color camera to a file. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/encoder_preview.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/recording/encoder_preview.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/recording/SDK_mcap_record.rst b/depthai_sdk/docs/source/samples/recording/SDK_mcap_record.rst new file mode 100644 index 000000000..b99fc5dca --- /dev/null +++ b/depthai_sdk/docs/source/samples/recording/SDK_mcap_record.rst @@ -0,0 +1,31 @@ +MCAP Recording +============== + +This example showcases the use of SDK to save to MCAP file format. The MCAP file contains color as well as both left and right mono cameras and their inferred depth map. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + + .. literalinclude:: ../../../../examples/recording/mcap_record.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/recording/SDK_mcap_record_imu.rst b/depthai_sdk/docs/source/samples/recording/SDK_mcap_record_imu.rst new file mode 100644 index 000000000..bd7c433c7 --- /dev/null +++ b/depthai_sdk/docs/source/samples/recording/SDK_mcap_record_imu.rst @@ -0,0 +1,30 @@ +MCAP IMU Recording +================== + +This example showcases how to record IMU data along with depth and save both in an MCAP file. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/recording/mcap_record_imu.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/recording/SDK_recording_duration.rst b/depthai_sdk/docs/source/samples/recording/SDK_recording_duration.rst new file mode 100644 index 000000000..c6ae2a377 --- /dev/null +++ b/depthai_sdk/docs/source/samples/recording/SDK_recording_duration.rst @@ -0,0 +1,36 @@ +Hardcode Recording Duration +=========================== + +This example shows how to record a video for a fixed duration of time. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/recording_duration.png + :alt: Pipeline graph + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + + .. literalinclude:: ../../../../examples/recording/recording_duration.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/recording/SDK_rosbag_record.rst b/depthai_sdk/docs/source/samples/recording/SDK_rosbag_record.rst new file mode 100644 index 000000000..15ecae853 --- /dev/null +++ b/depthai_sdk/docs/source/samples/recording/SDK_rosbag_record.rst @@ -0,0 +1,37 @@ +ROSBAG Recording +================ + +This example showcases the use of SDK to save color, mono, depth and IMU data to a ROSBAG file. This can be useful for recording data for later use, or for testing purposes. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/rosbag_record.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/recording/rosbag_record.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/recording/SDK_stereo_record.rst b/depthai_sdk/docs/source/samples/recording/SDK_stereo_record.rst new file mode 100644 index 000000000..b45a6bca0 --- /dev/null +++ b/depthai_sdk/docs/source/samples/recording/SDK_stereo_record.rst @@ -0,0 +1,36 @@ +Stereo Recording +================ + +This example shows how to record disparity map to a file. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/stereo_record.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/recording/stereo_record.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/replay/SDK_counter.rst b/depthai_sdk/docs/source/samples/replay/SDK_counter.rst new file mode 100644 index 000000000..e79575a60 --- /dev/null +++ b/depthai_sdk/docs/source/samples/replay/SDK_counter.rst @@ -0,0 +1,39 @@ +People Counter on Video Replay +============================== + +This example shows how to run the people counter pipeline on a video file. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_counter.gif + :alt: Counter demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/counter.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/replay/counter.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/replay/SDK_looped_replay.rst b/depthai_sdk/docs/source/samples/replay/SDK_looped_replay.rst new file mode 100644 index 000000000..69551aeb2 --- /dev/null +++ b/depthai_sdk/docs/source/samples/replay/SDK_looped_replay.rst @@ -0,0 +1,39 @@ +Looped Replay +============= + + +This example shows how to run replay in a loop. This means the device won't close when the replay file ends. + + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/looped_replay.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/replay/looped_replay.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/replay/SDK_people_tracker.rst b/depthai_sdk/docs/source/samples/replay/SDK_people_tracker.rst new file mode 100644 index 000000000..276e1f442 --- /dev/null +++ b/depthai_sdk/docs/source/samples/replay/SDK_people_tracker.rst @@ -0,0 +1,37 @@ +People Tracker on Video Replay +============================== + +This example shows how to run the people tracker pipeline on a video file. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/people_tracker.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/replay/people-tracker.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/replay/SDK_photo_download.rst b/depthai_sdk/docs/source/samples/replay/SDK_photo_download.rst new file mode 100644 index 000000000..9f01e9a3c --- /dev/null +++ b/depthai_sdk/docs/source/samples/replay/SDK_photo_download.rst @@ -0,0 +1,40 @@ +Face Detection Inference on Downloaded Image +============================================ + +This example shows how to run the face detection neural network model on a downloaded image from a specified url. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_photo_download.png + :alt: Photo Download Demo + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/photo_download.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/replay/photo-download.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/replay/SDK_youtube_download.rst b/depthai_sdk/docs/source/samples/replay/SDK_youtube_download.rst new file mode 100644 index 000000000..1e510dfc4 --- /dev/null +++ b/depthai_sdk/docs/source/samples/replay/SDK_youtube_download.rst @@ -0,0 +1,36 @@ +Vehicle Detection on a Youtube Video +==================================== + +This example shows how to run the vehicle detection neural network model on a downloaded Youtube video. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/youtube_download.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/replay/youtube-download.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/streaming/SDK_ros_publishing.rst b/depthai_sdk/docs/source/samples/streaming/SDK_ros_publishing.rst new file mode 100644 index 000000000..aa2faa53e --- /dev/null +++ b/depthai_sdk/docs/source/samples/streaming/SDK_ros_publishing.rst @@ -0,0 +1,30 @@ +ROS Publishing +============== + +This example shows how to use DepthAI SDK to create a ROS Publisher for left, right, color and IMU streams. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/streaming/ros_publishing.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/trigger_action/SDK_custom_action.rst b/depthai_sdk/docs/source/samples/trigger_action/SDK_custom_action.rst new file mode 100644 index 000000000..e6336fe06 --- /dev/null +++ b/depthai_sdk/docs/source/samples/trigger_action/SDK_custom_action.rst @@ -0,0 +1,36 @@ +Custom Trigger Action +===================== + +This example shows how to set custom action to be triggered when a certain event occurs. +In this case, we will trigger an action when a person is detected in the frame. The action will save the exact frame to a file. + +.. include:: /includes/blocking_behavior.rst + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/custom_action.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/trigger_action/custom_action.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/trigger_action/SDK_custom_trigger.rst b/depthai_sdk/docs/source/samples/trigger_action/SDK_custom_trigger.rst new file mode 100644 index 000000000..f5fb6aab5 --- /dev/null +++ b/depthai_sdk/docs/source/samples/trigger_action/SDK_custom_trigger.rst @@ -0,0 +1,36 @@ +Custom Trigger +============== + +This example shows how to set custom trigger condition in DepthAI SDK. The trigger condition returns a boolean value if the condition is met. +In this case the trigger will start a recording of disparity stream when all depth values are below 1 meter. + +.. include:: /includes/blocking_behavior.rst + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/custom_trigger.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/trigger_action/custom_trigger.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/trigger_action/SDK_person_record.rst b/depthai_sdk/docs/source/samples/trigger_action/SDK_person_record.rst new file mode 100644 index 000000000..7e3fd979d --- /dev/null +++ b/depthai_sdk/docs/source/samples/trigger_action/SDK_person_record.rst @@ -0,0 +1,35 @@ +Person Record +============= + +This example shows how to set up a trigger with a RecordAction to record both color and disparity frames when a condition is met. + +.. include:: /includes/blocking_behavior.rst + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/person_record.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/trigger_action/person_record.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/visualizer/SDK_visualizer.rst b/depthai_sdk/docs/source/samples/visualizer/SDK_visualizer.rst new file mode 100644 index 000000000..98171ce31 --- /dev/null +++ b/depthai_sdk/docs/source/samples/visualizer/SDK_visualizer.rst @@ -0,0 +1,36 @@ +Visualizer Demo +=============== + +This example shows how to use the visualizer component to display the detection results and configure the style of text and tracker. + +.. include:: /includes/blocking_behavior.rst + + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/visualizer.png + :alt: Pipeline graph + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + + .. literalinclude:: ../../../../examples/visualizer/visualizer.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/samples/visualizer/SDK_visualizer_callback.rst b/depthai_sdk/docs/source/samples/visualizer/SDK_visualizer_callback.rst new file mode 100644 index 000000000..db759ef24 --- /dev/null +++ b/depthai_sdk/docs/source/samples/visualizer/SDK_visualizer_callback.rst @@ -0,0 +1,41 @@ +Visualizer Callback Function +============================ + +This example demonstrates the use of a callback function to customize the visualization of detection results. + +.. include:: /includes/blocking_behavior.rst + +Demo +#### +.. image:: /_static/images/demos/sdk_visualizer_callback.png + :alt: Visualizer Callback Demo + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Pipeline +######## + +.. image:: /_static/images/pipelines/visualizer_callback.png + :alt: Pipeline graph + + + +Source Code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `_. + + .. literalinclude:: ../../../../examples/visualizer/visualizer_callback.py + :language: python + :linenos: + + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/depthai_sdk/docs/source/tutorials/code_samples.rst b/depthai_sdk/docs/source/tutorials/code_samples.rst new file mode 100644 index 000000000..d7207dbe0 --- /dev/null +++ b/depthai_sdk/docs/source/tutorials/code_samples.rst @@ -0,0 +1,97 @@ +Code Samples +============ + +.. toctree:: + :hidden: + :glob: + + ../samples/CameraComponent/* + ../samples/mixed/* + ../samples/IMUComponent/* + ../samples/NNComponent/* + ../samples/PointcloudComponent/* + ../samples/recording/* + ../samples/replay/* + ../samples/StereoComponent/* + ../samples/streaming/* + ../samples/trigger_action/* + ../samples/visualizer/* + +Code samples are used for automated testing. They are also a great starting point for the DepthAI SDK, as different component functionalities +are presented with code. + + +.. rubric:: ColorCamera + +- :ref:`FFC Camera Visualization` - Preview FFC Cameras +- :ref:`Camera Control` - Demonstrates RGB camera control from the host +- :ref:`Camera Preview` - Preview color, right, left and depth frames +- :ref:`Camera Control with NN` - Control camera (focus, exposure) with NN detections +- :ref:`Mono Camera Preview` - Preview mono cameras with manual 400p resolution +- :ref:`Preview All Cameras` - Preview all cameras connected to the OAK device +- :ref:`RGB and Mono Preview` - Preview RGB and mono cameras +- :ref:`Camera Rotated Preview` - Demonstrates how to rotate the camera previews + +.. rubric:: Mixed +- :ref:`API Interoperability Example` - Demonstrates interoperability between the DepthAI API and the SDK +- :ref:`Car Tracking Example` - Demonstrates how to run inference on a pre-saved video +- :ref:`Collision Avoidance` - Demonstrates how to run collision avoidance +- :ref:`Speed Calculation Preview` - Demonstrates how to calculate speed of detected objects in the frame` +- :ref:`Switch Between Models` - Demonstrates how to switch between models +- :ref:`Sync Multiple Outputs` - Demonstrates how to sync multiple outputs + +.. rubric:: IMU +- :ref:`IMU Demonstration` - Demonstrates how to use and display the IMU +- :ref:`IMU Rerun Demonstration` - Demonstrates how use and display the IMU in Rerun + +.. rubric:: NN +- :ref:`Age-Gender Inference` - Demonstrates age-gender inference +- :ref:`Custom Decode Function` - Demonstrates custom decoding function +- :ref:`Deeplabv3 Person Segmentation` - Demonstrates Deeplabv3 person segmentation +- :ref:`Emotion Recognition` - Demonstrates emotion recognition +- :ref:`Face Detection RGB` - Run face detection on RGB camera +- :ref:`Face Detection Mono` - Run face detection on mono camera +- :ref:`Human Pose Estimation` - Run human pose estimation inference +- :ref:`MobileNet Encoded` - Pass encoded color stream to the NN (MobileNet) +- :ref:`Neural Network Component` - Run color camera stream through NN (YoloV7) +- :ref:`Object Tracking` - Tracking objects in the frame +- :ref:`Roboflow Integration` - Demonstrates how to use Roboflow platform to train a custom model +- :ref:`Spatial Detection` - Perform spatial detection with at MobileNet model +- :ref:`Yolo SDK` - Run YoloV3 inference on the color camera stream + +.. rubric:: Pointcloud +- :ref:`Pointcloud Demo` - Preview pointcloud with rerun viewer + +.. rubric:: Recording +- :ref:`Encode Multiple Streams` - Demonstrates how to encode multiple (color, left, right) streams and save them to a file +- :ref:`Preview Encoder` - Record color camera stream and save it as mjpeg +- :ref:`MCAP Recording` - Record color, left, right and depth streams and save them to a MCAP +- :ref:`MCAP IMU Recording` - Record IMU and depth streams and save them to a MCAP +- :ref:`Hardcode Recording Duration` - Record color camera stream for a specified duration +- :ref:`ROSBAG Recording` - Record IMU, left, right and depth streams and save them to a ROSBAG +- :ref:`Stereo Recording` - Records disparity stream + +.. rubric:: Replay +- :ref:`People Counter on Video Replay` - Run people counter on a pre-saved video +- :ref:`People Tracker on Video Replay` - Run people tracker on a pre-saved video +- :ref:`Face Detection Inference on Downloaded Image` - Run face detection on a downloaded image +- :ref:`Vehicle Detection on a Youtube Video` - Run vehicle detection on a Youtube video stream +- :ref:`Looped Replay` - Replay a pre-saved video in a loop + +.. rubric:: Stereo +- :ref:`Stereo Preview` - Display WLS filtered disparity map +- :ref:`Auto IR Brightness` - Demonstrates the use of auto IR brightness function +- :ref:`Stereo Control` - Demonstrates stereo control (median filter, decimation factor, confidence threshold) from the host +- :ref:`Stereo Encoding` - Demonstrates how to encode stereo stream and visualize it + +.. rubric:: Streaming +- :ref:`ROS Publishing` - Publish color, left, right and IMU streams to ROS + +.. rubric:: Trigger Action +- :ref:`Custom Trigger Action` - Demonstrates how to set a custom trigger action +- :ref:`Custom Trigger` - Demonstrates how to set a custom trigger +- :ref:`Person Record` - Demonstrates how to record a person when a person is detected + +.. rubric:: Visualizer +- :ref:`Visualizer Demo` - Demonstrates how to use the visualizer +- :ref:`Visualizer Callback Function` - Demonstrates how to set the visualizer callback function diff --git a/depthai_sdk/examples/recording/recording_duration.py b/depthai_sdk/examples/recording/recording_duration.py new file mode 100644 index 000000000..65fe7e21e --- /dev/null +++ b/depthai_sdk/examples/recording/recording_duration.py @@ -0,0 +1,16 @@ +from depthai_sdk import OakCamera, RecordType +import time + +with OakCamera() as oak: + color = oak.create_camera('color', resolution='1080P', fps=10, encode='H265') + left = oak.create_camera('left', resolution='800p', fps=10, encode='H265') + right = oak.create_camera('right', resolution='800p', fps=10, encode='H265') + + # Sync & save all (encoded) streams + oak.record([color.out.encoded, left.out.encoded, right.out.encoded], './record') + oak.start() + start_time = time.monotonic() + while oak.running(): + if time.monotonic() - start_time > 5: + break + oak.poll() \ No newline at end of file