diff --git a/docker/Dockerfile.gpu b/docker/Dockerfile.gpu index 4e8a8085..8dd7a5d0 100644 --- a/docker/Dockerfile.gpu +++ b/docker/Dockerfile.gpu @@ -1,11 +1,13 @@ -FROM nvidia/cuda:12.2.0-runtime-ubi8 as base +FROM nvidia/cuda:11.7.1-runtime-ubi8 as base ARG cnlpt_version RUN yum -y install python39 python39-pip -RUN pip3.9 install cython torch +RUN pip3.9 install cython RUN pip3.9 install cnlp-transformers==$cnlpt_version +# pytorch can't find the cudnn library with our setup, so just point at it directly +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.9/site-packages/nvidia/cudnn/lib/" WORKDIR /opt/cnlp/ # this copy is to support the preload of train models in the downstream images diff --git a/docker/MAINTAINER.md b/docker/MAINTAINER.md index 48bf2a2a..722703c2 100644 --- a/docker/MAINTAINER.md +++ b/docker/MAINTAINER.md @@ -14,9 +14,11 @@ Pass `--help` to see all your options. ### Local Testing Use the `./build.py` script to build the image you care about, -and then run something like the following, depending on your model: -```shell +and then run something like one of the following, depending on your model and processor: + +``` docker run --rm -p 8000:8000 smartonfhir/cnlp-transformers:termexists-latest-cpu +docker run --rm -p 8000:8000 --gpus all smartonfhir/cnlp-transformers:termexists-latest-gpu ``` With that specific example of the `termexists` model, you could smoke test it like so: