-
Notifications
You must be signed in to change notification settings - Fork 18
/
docker-compose.yml
85 lines (79 loc) · 2.75 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
version: '3.9'
x-gpu-base-service: &gpu_service
#runtime: nvidia
privileged: true
devices:
- /dev/nvidia0:/dev/nvidia0
- /dev/nvidiactl:/dev/nvidiactl
- /dev/nvidia-caps:/dev/nvidia-caps
- /dev/nvidia-modeset:/dev/nvidia-modeset
- /dev/nvidia-uvm:/dev/nvidia-uvm
- /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [gpu]
x-base_service: &base_service
user: "${UID:-0}:${GID:-0}"
#network_mode: "host"
ports:
- "7860:7680"
build:
context: ./service
args:
# Compile time args
pyver: "3.10"
pyimage: python:3.10-slim
#XFORMERS_COMMAND: /bin/bash /docker/install-container-dep.sh --upgrade-strategy only-if-needed /docker/xformers-*.whl
XFORMERS_COMMAND: /bin/bash /docker/install-container-dep.sh xformers==0.0.20
#TORCH_COMMAND: /bin/bash /docker/install-container-dep.sh /docker/tensorflow-*.whl /docker/torch-*.whl /docker/torchvision-*.whl /docker/torchaudio-*.whl
TORCH_COMMAND: /bin/bash /docker/install-container-dep.sh torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.1+cu118
#TENSORFLOW_COMMAND: /bin/bash /docker/install-container-dep.sh /docker/tensorflow-*.whl
TENSORFLOW_COMMAND: /bin/bash /docker/install-container-dep.sh tensorflow==2.12.0
# General configuration
PIP_REPOSITORY: "https://download.pytorch.org/whl/cu118"
PYTORCH_CUDA_ALLOC_CONF: "garbage_collection_threshold:0.9,max_split_size_mb:256"
TRITON_VERSION: "2.0.0"
DEEPSPEED_VERSION: "0.9.2"
CUDNN_VERSION: "8.6.0.163"
CUDA_VERSION: cuda-11-8
CUDA_DRIVERS: cuda-drivers-535
CUDA_KEYRING: https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb
DS_BUILD_OPS: 1
TORCH_CUDA_ARCH_LIST: 7.5+PTX
NVCC_FLAGS: --use_fast_math
JAX: False
TPU: False
DEEPSPEED: False
volumes:
- &v1 ./data:/data
- &v2 /tmp/.X11-unix:/tmp/.X11-unix
deploy:
restart_policy:
delay: 5s
max_attempts: 10
window: 120s
name: kohya-docker
services:
kohya: &kohya_service
<<: [*base_service, *gpu_service]
profiles: ["kohya"]
environment:
- TF_ENABLE_ONEDNN_OPTS=1
- XDG_CACHE_HOME=/data/.cache
- ACCELERATE=False
- DISPLAY=unix$DISPLAY
- SAFETENSORS_FAST_GPU=1
# - RUN_ARGS=/koyah_ss/kohya_gui.py --listen 0.0.0.0 --server_port 7680
- RUN_ARGS=/koyah_ss/gui.sh --listen 0.0.0.0 --server_port 7680
- RUNNER=/docker/run.sh
kohya_debug:
<<: [*base_service]
profiles: ["kohya_debug"]
stdin_open: true
tty: true
environment:
- RUNNER=/docker/debug.sh