diff --git a/configs/mmaction/video-recognition/video-recognition_torchscript.py b/configs/mmaction/video-recognition/video-recognition_torchscript.py new file mode 100644 index 0000000000..5f11330943 --- /dev/null +++ b/configs/mmaction/video-recognition/video-recognition_torchscript.py @@ -0,0 +1,7 @@ +_base_ = [ + '../../_base_/torchscript_config.py', + '../../_base_/backends/torchscript.py' +] + +ir_config = dict(input_shape=None) +codebase_config = dict(type='mmaction', task='VideoRecognition') diff --git a/docs/en/01-how-to-build/build_from_source.md b/docs/en/01-how-to-build/build_from_source.md index e06a5c6eb9..cf47af015e 100644 --- a/docs/en/01-how-to-build/build_from_source.md +++ b/docs/en/01-how-to-build/build_from_source.md @@ -40,6 +40,7 @@ Please visit the following links to find out how to build MMDeploy according to - [Linux-x86_64](linux-x86_64.md) - [Windows](windows.md) +- [MacOS](macos-arm64.md) - [Android-aarch64](android.md) - [NVIDIA Jetson](jetsons.md) - [SNPE](snpe.md) diff --git a/docs/en/04-supported-codebases/mmaction2.md b/docs/en/04-supported-codebases/mmaction2.md index 8f0d5a04e2..42c9f51b9b 100644 --- a/docs/en/04-supported-codebases/mmaction2.md +++ b/docs/en/04-supported-codebases/mmaction2.md @@ -186,5 +186,7 @@ Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Inter | Model | TorchScript | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO | | :----------------------------------------------------------------------------------------- | :---------: | :----------: | :------: | :--: | :---: | :------: | -| [TSN](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/tsn) | N | Y | Y | N | N | N | -| [SlowFast](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/slowfast) | N | Y | Y | N | N | N | +| [TSN](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/tsn) | Y | Y | Y | N | N | N | +| [SlowFast](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/slowfast) | Y | Y | Y | N | N | N | +| [TSM](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/tsm) | Y | Y | Y | N | N | N | +| [X3D](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/x3d) | Y | Y | Y | N | N | N | diff --git a/docs/en/04-supported-codebases/mmseg.md b/docs/en/04-supported-codebases/mmseg.md index 258d695348..0e50bcabee 100644 --- a/docs/en/04-supported-codebases/mmseg.md +++ b/docs/en/04-supported-codebases/mmseg.md @@ -218,7 +218,7 @@ Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Inter | [UPerNet](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/upernet)[\*](#static_shape) | N | Y | Y | N | N | N | | [DANet](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/danet) | ? | Y | Y | N | N | Y | | [Segmenter](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/segmenter)[\*](#static_shape) | N | Y | Y | Y | N | Y | -| [SegFormer](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/segformer)[\*](#static_shape) | ? | Y | Y | N | N | Y | +| [SegFormer](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/segformer)[\*](#static_shape) | Y | Y | Y | N | N | Y | | [SETR](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/setr) | ? | Y | N | N | N | Y | | [CCNet](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/ccnet) | ? | N | N | N | N | N | | [PSANet](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/psanet) | ? | N | N | N | N | N | diff --git a/docs/zh_cn/01-how-to-build/build_from_source.md b/docs/zh_cn/01-how-to-build/build_from_source.md index 597286897b..04c85655f1 100644 --- a/docs/zh_cn/01-how-to-build/build_from_source.md +++ b/docs/zh_cn/01-how-to-build/build_from_source.md @@ -43,6 +43,7 @@ git clone -b main git@github.com:open-mmlab/mmdeploy.git --recursive - [Linux-x86_64](linux-x86_64.md) - [Windows](windows.md) +- [MacOS](macos-arm64.md) - [Android-aarch64](android.md) - [NVIDIA Jetson](jetsons.md) - [Qcom SNPE](snpe.md) diff --git a/docs/zh_cn/04-supported-codebases/mmaction2.md b/docs/zh_cn/04-supported-codebases/mmaction2.md index 3e7916ff85..6e31b86c96 100644 --- a/docs/zh_cn/04-supported-codebases/mmaction2.md +++ b/docs/zh_cn/04-supported-codebases/mmaction2.md @@ -189,5 +189,7 @@ for label_id, score in result: | Model | TorchScript | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO | | :----------------------------------------------------------------------------------------- | :---------: | :----------: | :------: | :--: | :---: | :------: | -| [TSN](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/tsn) | N | Y | Y | N | N | N | -| [SlowFast](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/slowfast) | N | Y | Y | N | N | N | +| [TSN](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/tsn) | Y | Y | Y | N | N | N | +| [SlowFast](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/slowfast) | Y | Y | Y | N | N | N | +| [TSM](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/tsm) | Y | Y | Y | N | N | N | +| [X3D](https://github.com/open-mmlab/mmaction2/tree/main/configs/recognition/x3d) | Y | Y | Y | N | N | N | diff --git a/docs/zh_cn/04-supported-codebases/mmseg.md b/docs/zh_cn/04-supported-codebases/mmseg.md index d16dd97f68..b97cb526dd 100644 --- a/docs/zh_cn/04-supported-codebases/mmseg.md +++ b/docs/zh_cn/04-supported-codebases/mmseg.md @@ -222,7 +222,7 @@ cv2.imwrite('output_segmentation.png', img) | [UPerNet](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/upernet)[\*](#static_shape) | N | Y | Y | N | N | N | | [DANet](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/danet) | ? | Y | Y | N | N | Y | | [Segmenter](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/segmenter)[\*](#static_shape) | N | Y | Y | Y | N | Y | -| [SegFormer](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/segformer)[\*](#static_shape) | ? | Y | Y | N | N | Y | +| [SegFormer](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/segformer)[\*](#static_shape) | Y | Y | Y | N | N | Y | | [SETR](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/setr) | ? | Y | N | N | N | Y | | [CCNet](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/ccnet) | ? | N | N | N | N | N | | [PSANet](https://github.com/open-mmlab/mmsegmentation/tree/main/configs/psanet) | ? | N | N | N | N | N | diff --git a/tests/regression/mmaction.yml b/tests/regression/mmaction.yml index 2356244524..e6cfcaee32 100644 --- a/tests/regression/mmaction.yml +++ b/tests/regression/mmaction.yml @@ -19,7 +19,7 @@ globals: convert_image: &convert_image input_img: *video test_img: *video - backend_test: &default_backend_test True + backend_test: &default_backend_test False sdk: sdk_dynamic: &sdk_dynamic "" @@ -29,6 +29,12 @@ onnxruntime: deploy_config: configs/mmaction/video-recognition/video-recognition_onnxruntime_static.py backend_test: *default_backend_test +torchscript: + pipeline_torchscript_fp32: &pipeline_torchscript_fp32 + convert_image: *convert_image + deploy_config: configs/mmaction/video-recognition/video-recognition_torchscript.py + backend_test: *default_backend_test + tensorrt: pipeline_trt_2d_static_fp32: &pipeline_trt_2d_static_fp32 convert_image: *convert_image @@ -47,6 +53,7 @@ models: pipelines: - *pipeline_ort_static_fp32 - *pipeline_trt_2d_static_fp32 + - *pipeline_torchscript_fp32 - name: SlowFast metafile: configs/recognition/slowfast/metafile.yml @@ -55,3 +62,20 @@ models: pipelines: - *pipeline_ort_static_fp32 - *pipeline_trt_3d_static_fp32 + - *pipeline_torchscript_fp32 + + - name: TSM + metafile: configs/recognition/tsm/metafile.yml + model_configs: + - configs/recognition/tsm/tsm_imagenet-pretrained-mobilenetv2_8xb16-1x1x8-100e_kinetics400-rgb.py + pipelines: + - *pipeline_ort_static_fp32 + - *pipeline_torchscript_fp32 + + - name: X3D + metafile: configs/recognition/x3d/metafile.yml + model_configs: + - configs/recognition/x3d/x3d_m_16x5x1_facebook-kinetics400-rgb.py + pipelines: + - *pipeline_ort_static_fp32 + - *pipeline_torchscript_fp32 diff --git a/tests/regression/mmdet.yml b/tests/regression/mmdet.yml index 3213111278..e6c61fd66d 100644 --- a/tests/regression/mmdet.yml +++ b/tests/regression/mmdet.yml @@ -361,3 +361,13 @@ models: pipelines: - *pipeline_seg_ort_dynamic_fp32 - *pipeline_seg_openvino_dynamic_fp32 + + - name: RTMDetInst + metafile: configs/rtmdet/metafile.yml + model_configs: + - configs/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco.py + pipelines: + - *pipeline_seg_ts_fp32 + - *pipeline_seg_ort_dynamic_fp32 + - *pipeline_seg_trt_dynamic_fp32 + - *pipeline_seg_openvino_dynamic_fp32 diff --git a/tests/regression/mmseg.yml b/tests/regression/mmseg.yml index 9827749133..d9152372d3 100644 --- a/tests/regression/mmseg.yml +++ b/tests/regression/mmseg.yml @@ -372,3 +372,12 @@ models: - *pipeline_trt_static_fp32_512x512 - *pipeline_openvino_static_fp32_512x512 - *pipeline_ncnn_static_fp32 + - name: SegFormer + metafile: configs/segformer/metafile.yaml + model_configs: + - configs/segformer/segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py + pipelines: + - *pipeline_ts_fp32 + - *pipeline_ort_static_fp32 + - *pipeline_trt_static_fp32 + - *pipeline_openvino_static_fp32