Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade CI and prebuild to cu118 #2447

Merged
merged 12 commits into from
Sep 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .github/scripts/prepare_reg_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,12 @@ def install_torch(torch_version):
tv = version.parse(torch_version)
if tv < version.parse('1.10.0'):
cuda_int = '111'
elif tv >= version.parse('1.13.0'):
elif tv < version.parse('1.13.0'):
cuda_int = '113'
elif tv < version.parse('2.0.0'):
cuda_int = '117'
else:
cuda_int = '118'

is_torch_v2 = tv >= version.parse('2.0.0')
if is_torch_v2:
Expand Down
28 changes: 16 additions & 12 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -268,12 +268,14 @@ jobs:
COLOR: ${{ steps.badge_status.conclusion == 'success' && 'green' || 'red' }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

build_cuda113_linux:
build_cuda118_linux:
needs: [build_cpu_model_convert, build_cpu_sdk, build_cuda117]
runs-on: [self-hosted, linux-3090]
container:
image: openmmlab/mmdeploy:ubuntu20.04-cuda11.3
image: openmmlab/mmdeploy:ubuntu20.04-cuda11.8
options: "--gpus=all --ipc=host"
volumes:
- /data2/pip-cache:/root/.cache/pip
steps:
- name: Checkout repository
uses: actions/checkout@v3
Expand Down Expand Up @@ -313,17 +315,17 @@ jobs:
if: always()
uses: RubbaBoy/[email protected]
with:
NAME: build_cuda113_linux
NAME: build_cuda118_linux
LABEL: 'build'
STATUS: ${{ steps.badge_status.conclusion == 'success' && 'passing' || 'failing' }}
COLOR: ${{ steps.badge_status.conclusion == 'success' && 'green' || 'red' }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

build_cuda113_windows:
build_cuda118_windows:
needs: [build_cpu_model_convert, build_cpu_sdk, build_cuda117]
runs-on: [self-hosted, win10-3080]
env:
BASE_ENV: cuda11.3-cudnn8.2-py3.8-torch1.10
BASE_ENV: mmdeploy-cuda11.8-torch2.0-py38
defaults:
run:
shell: powershell
Expand Down Expand Up @@ -356,18 +358,18 @@ jobs:
python -V
mkdir build
cd build
cmake .. -A x64 -T v142 `
cmake .. -A x64 -T v142,cuda=$env:CUDA_PATH `
-DMMDEPLOY_BUILD_TEST=ON `
-DMMDEPLOY_BUILD_SDK_CSHARP_API=ON `
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON `
-DMMDEPLOY_BUILD_SDK=ON `
-DMMDEPLOY_TARGET_DEVICES='cuda' `
-DMMDEPLOY_TARGET_BACKENDS='ort;trt' `
-DMMDEPLOY_CODEBASES='all' `
-Dpplcv_DIR="$env:PPLCV_DIR\pplcv-build\install\lib\cmake\ppl" `
-DOpenCV_DIR="$env:OPENCV_DIR\build\x64\vc15\lib" `
-Dpplcv_DIR="$env:pplcv_DIR" `
-DOpenCV_DIR="$env:OpenCV_DIR" `
-DTENSORRT_DIR="$env:TENSORRT_DIR" `
-DONNXRUNTIME_DIR="$env:ONNXRUNTIME_DIR" `
-DONNXRUNTIME_DIR="$env:ONNXRUNTIME_GPU_DIR" `
-DMMDEPLOY_BUILD_EXAMPLES=ON `
-DCUDNN_DIR="$env:CUDNN_DIR"
cmake --build . --config Release -- /m
Expand All @@ -382,21 +384,23 @@ jobs:
run: |
conda activate $env:TEMP_ENV
$env:path = "$pwd\build\bin\Release;" + $env:path
$env:path = "$env:ONNXRUNTIME_GPU_DIR\lib;" + $env:path
echo $env:path
.github\scripts\windows\test_full_pipeline.ps1 -Backend trt -Device cuda
- name: Clear temp env
if: always()
run: |
conda env remove --prefix "$env:TEMP_ENV"

badge_build_cuda113_windows:
needs: build_cuda113_windows
badge_build_cuda118_windows:
needs: build_cuda118_windows
if: always()
runs-on: ubuntu-20.04
steps:
- name: create badge
uses: RubbaBoy/[email protected]
with:
NAME: build_cuda113_windows
NAME: build_cuda118_windows
LABEL: 'build'
STATUS: ${{ needs.build_cuda113_windows.result == 'success' && 'passing' || needs.build_cuda113_windows.result }}
COLOR: ${{ needs.build_cuda113_windows.result == 'success' && 'green' || 'red' }}
Expand Down
53 changes: 33 additions & 20 deletions .github/workflows/docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,34 +20,47 @@ on:

jobs:
publish_docker_image:
runs-on: [self-hosted, linux-3090]
runs-on: ubuntu-latest
environment: 'prod'
env:
TAG_PREFIX: openmmlab/mmdeploy:ubuntu20.04-cuda11.3-mmdeploy
TAG_PREFIX: openmmlab/mmdeploy:ubuntu20.04-cuda11.8-mmdeploy
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Get mmdeploy version
if: startsWith(github.ref, 'refs/tags/') == false
- name: Check disk space
run: |
echo "MMDEPLOY_VERSION=main" >> $GITHUB_ENV
echo "TAG=$TAG_PREFIX" >> $GITHUB_ENV
- name: Get mmdeploy tag
if: startsWith(github.ref, 'refs/tags/') == true
df -h
ls /opt/hostedtoolcache
rm -rf ${GITHUB_WORKSPACE}/.git
rm -rf /opt/hostedtoolcache/go
rm -rf /opt/hostedtoolcache/node
rm -rf /opt/hostedtoolcache/Ruby
rm -rf /opt/hostedtoolcache/CodeQL
cat /proc/cpuinfo | grep -ic proc
free
df -h
df . -h
- name: Get docker info
run: |
export MMDEPLOY_VERSION=$(python3 -c "import sys; sys.path.append('mmdeploy');from version import __version__;print(__version__)")
echo $MMDEPLOY_VERSION
echo "TAG=${TAG_PREFIX}${MMDEPLOY_VERSION}" >> $GITHUB_ENV
echo "MMDEPLOY_VERSION=v$MMDEPLOY_VERSION" >> $GITHUB_ENV
- name: Build Docker image
continue-on-error: true
docker info
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push the latest Docker image
run: |
echo $MMDEPLOY_VERSION
export TAG=$TAG_PREFIX
echo "TAG=${TAG}" >> $GITHUB_ENV
echo $TAG
docker build docker/Release/ -t ${TAG} --no-cache --build-arg MMDEPLOY_VERSION=${MMDEPLOY_VERSION}
- name: Push Docker image
continue-on-error: true
docker ./docker/Release/ -t ${TAG} --no-cache
docker push $TAG
- name: Push docker image with released tag
if: startsWith(github.ref, 'refs/tags/') == true
run: |
export MMDEPLOY_VERSION=$(python3 -c "import sys; sys.path.append('mmdeploy');from version import __version__;print(__version__)")
echo $MMDEPLOY_VERSION
echo $TAG
docker push $TAG
export RELEASE_TAG=${TAG_PREFIX}${MMDEPLOY_VERSION}
echo $RELEASE_TAG
docker tag $TAG $RELEASE_TAG
docker push $RELEASE_TAG
8 changes: 5 additions & 3 deletions .github/workflows/prebuild.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,11 @@ jobs:
if: inputs.run == true || ${{ github.event_name == 'push' }}
runs-on: [self-hosted, linux-3090]
container:
image: openmmlab/mmdeploy:manylinux2014_x86_64-cuda11.3
image: openmmlab/mmdeploy:manylinux2014_x86_64-cuda11.8
options: "--gpus=all --ipc=host"
volumes:
- /data2/actions-runner/prebuild:/__w/mmdeploy/prebuild
- /data2/pip-cache:/root/.cache/pip
steps:
- name: Checkout repository
uses: actions/checkout@v3
Expand Down Expand Up @@ -80,10 +81,11 @@ jobs:
if: inputs.run == true || ${{ github.event_name == 'push' }}
runs-on: [self-hosted, linux-3090]
container:
image: openmmlab/mmdeploy:build-ubuntu16.04-cuda11.3
image: openmmlab/mmdeploy:build-ubuntu16.04-cuda11.8
options: "--gpus=all --ipc=host"
volumes:
- /data2/actions-runner/prebuild:/__w/mmdeploy/prebuild
- /data2/pip-cache:/root/.cache/pip
steps:
- name: Checkout repository
uses: actions/checkout@v3
Expand All @@ -107,7 +109,7 @@ jobs:
cd pack
python ../tools/package_tools/generate_build_config.py --backend 'ort;trt' \
--system linux --output config.yml --device cuda --build-sdk --build-sdk-monolithic \
--sdk-dynamic-net --cxx11abi --onnxruntime-dir=$ONNXRUNTIME_GPU_DIR --cudnn-dir /usr
--sdk-dynamic-net --cxx11abi --onnxruntime-dir=$ONNXRUNTIME_GPU_DIR --cudnn-dir $CUDNN_DIR
python ../tools/package_tools/mmdeploy_builder.py --config config.yml
- name: Zip mmdeploy sdk
run: |
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/regression-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ on:
required: true
description: 'Tested torch versions. Default is ["1.10.0"]'
type: string
default: "['1.10.0']"
default: "['2.0.0']"
models:
required: true
description: 'Tested model list, eg: "resnet yolov3". Default is "all".\r\n Example: resnet ssd yolov5 maskrcnn srcnn pointpillars dbnet crnn hrnet fastscnn slowfast RotatedRetinanet'
Expand Down Expand Up @@ -65,8 +65,8 @@ jobs:
timeout-minutes: 4320 # 72hours
environment: 'prod'
container:
image: openmmlab/mmdeploy:ubuntu20.04-cuda11.3
options: "--gpus=all --ipc=host"
image: openmmlab/mmdeploy:ubuntu20.04-cuda11.8
options: "--gpus=all --ipc=host -e NVIDIA_DRIVER_CAPABILITIES=all -e DISPLAY"
volumes:
- /data2/checkpoints:/__w/mmdeploy/mmdeploy_checkpoints
- /data2/benchmark:/__w/mmdeploy/data
Expand Down Expand Up @@ -173,7 +173,7 @@ jobs:
environment: 'prod'
runs-on: [self-hosted, win10-3080]
env:
BASE_ENV: cuda11.3-cudnn8.2-py3.8-torch1.10
BASE_ENV: mmdeploy-cuda11.8-torch2.0-py38
DATASET_DIR: D:\reg-test\data
REGRESSION_DIR: D:\reg-test\regression_log
CHECKPOINT_DIR: D:\reg-test\checkpoints
Expand Down Expand Up @@ -240,7 +240,7 @@ jobs:
echo "BACKENDS=$env:BACKENDS" >> $env:GITHUB_ENV
New-Item -Path build -ItemType Directory -Force
cd build
cmake .. -A x64 -T v142 `
cmake .. -A x64 -T v142,cuda=$env:CUDA_PATH `
-DMMDEPLOY_BUILD_TEST=OFF `
-DMMDEPLOY_BUILD_SDK_CSHARP_API=ON `
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON `
Expand Down
File renamed without changes.
24 changes: 14 additions & 10 deletions docker/Base/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,22 +1,24 @@
ARG CUDA_INT=113
ARG CUDA_INT=118

FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 AS cuda-118
FROM nvidia/cuda:11.3.0-cudnn8-devel-ubuntu20.04 AS cuda-113
FROM nvidia/cuda:10.2-cudnn8-devel-ubuntu18.04 AS cuda-102

FROM cuda-${CUDA_INT} AS final

ARG TORCH_VERSION=1.10.0
ARG TORCHVISION_VERSION=0.11.0
ARG TORCH_VERSION=2.0.0
ARG TORCHVISION_VERSION=0.15.1

# important dependencies
ARG OPENCV_VERSION==4.5.4.60
ARG PPLCV_VERSION=0.7.0

# backends
ARG ONNXRUNTIME_VERSION=1.8.1
ARG ONNXRUNTIME_VERSION=1.15.1
ARG PPLNN_VERSION=0.8.1
ARG NCNN_VERSION=20221128
ARG TENSORRT_VERSION=8.2.3.0
ARG NCNN_VERSION=20230816
ARG TENSORRT_VERSION=8.6.1.6
ARG OPENVINO_VERSION=2022.3.0

# tensorrt tar file url
ARG TENSORRT_URL

Expand Down Expand Up @@ -47,21 +49,23 @@ ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
RUN wget https://download.java.net/java/GA/jdk18/43f95e8614114aeaa8e8a5fcf20a682d/36/GPL/openjdk-18_linux-x64_bin.tar.gz &&\
tar xvf openjdk-18_linux-x64_bin.tar.gz && rm -rf openjdk-18_linux-x64_bin.tar.gz && \
wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\
tar -xzvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz && rm onnxruntime-*.tgz &&\
tar -xzvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\
wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}.tgz &&\
tar -xzvf onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}.tgz && rm onnxruntime-*.tgz &&\
wget https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-linux-x86_64.tar.gz &&\
tar -xzvf cmake-3.25.2-linux-x86_64.tar.gz && rm cmake-*.tar.gz && mv cmake-* cmake &&\
export CUDA_INT=$(echo $CUDA_VERSION | awk '{split($0, a, "."); print a[1]a[2]}') &&\
python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel &&\
python3 -m pip install --no-cache-dir onnxruntime-gpu==${ONNXRUNTIME_VERSION} &&\
python3 -m pip install --no-cache-dir openvino openvino-dev[onnx] &&\
python3 -m pip install --no-cache-dir openvino openvino-dev[onnx]==${OPENVINO_VERSION} &&\
python3 -m pip install --no-cache-dir opencv-python==${OPENCV_VERSION} opencv-python-headless==${OPENCV_VERSION} opencv-contrib-python==${OPENCV_VERSION} &&\
python3 -m pip install --no-cache-dir torch==${TORCH_VERSION}+cu${CUDA_INT} torchvision==${TORCHVISION_VERSION}+cu${CUDA_INT} -f https://download.pytorch.org/whl/torch_stable.html

# create env
ENV JAVA_HOME=/root/workspace/jdk-18
ENV PATH=$JAVA_HOME/bin:/root/workspace/cmake/bin:$PATH
ENV ONNXRUNTIME_VERSION=${ONNXRUNTIME_VERSION}
ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}
ENV ONNXRUNTIME_GPU_DIR=/root/workspace/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}
ENV LD_LIBRARY_PATH=${ONNXRUNTIME_DIR}/lib:$LD_LIBRARY_PATH

### install ppl.nn
Expand Down
6 changes: 3 additions & 3 deletions docker/Release/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
FROM openmmlab/mmdeploy:ubuntu20.04-cuda11.3
FROM openmmlab/mmdeploy:ubuntu20.04-cuda11.8

ARG MMDEPLOY_VERSION
ARG MMDEPLOY_VERSION=main

ENV BACKUP_LD_LIBRARY_PATH=$LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=/usr/local/cuda/compat:$LD_LIBRARY_PATH
Expand All @@ -20,7 +20,7 @@ RUN git clone --recursive -b $MMDEPLOY_VERSION --depth 1 https://github.com/open
python3 -m pip install -U openmim pycuda &&\
python3 -m mim install "mmcv>=2.0.0" &&\
python3 -m pip install -r requirements.txt &&\
python3 -m pip install -e .
python3 -m pip install -e . --user

ENV MMDeploy_DIR="/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy"
ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${BACKUP_LD_LIBRARY_PATH}"
Expand Down
11 changes: 6 additions & 5 deletions docker/prebuild/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,19 @@ ARG CUDA_URL
ARG CUDNN_URL
ARG TENSORRT_URL

ARG CUDA_VERSION=11.3
ARG CUDA_VERSION=11.8

# important dependencies
ARG OPENCV_VERSION=4.5.5
ARG PPLCV_VERSION=0.7.0

# backends
ARG ONNXRUNTIME_VERSION=1.8.1
ARG TENSORRT_VERSION=8.2.3.0
ARG ONNXRUNTIME_VERSION=1.15.1
ARG TENSORRT_VERSION=8.6.1.6

# torch
ARG TORCH_VERSION=1.10.0
ARG TORCHVISION_VERSION=0.11.0
ARG TORCH_VERSION=2.0.0
ARG TORCHVISION_VERSION=0.15.0

ARG TOOLSET_VERSION=7

Expand Down Expand Up @@ -105,6 +105,7 @@ RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Mini
/opt/conda/bin/conda create -n mmdeploy-3.8 python=3.8 -y &&\
/opt/conda/bin/conda create -n mmdeploy-3.9 python=3.9 -y &&\
/opt/conda/bin/conda create -n mmdeploy-3.10 python=3.10 -y &&\
/opt/conda/bin/conda create -n mmdeploy-3.11 python=3.11 -y &&\
export CUDA_INT=$(echo $CUDA_VERSION | awk '{split($0, a, "."); print a[1]a[2]}') &&\
/opt/conda/bin/conda create -n torch${TORCH_VERSION} python=3.8 -y &&\
/opt/conda/envs/mmdeploy-3.6/bin/pip install --no-cache-dir setuptools wheel pyyaml packaging &&\
Expand Down
Loading
Loading