Skip to content

Commit

Permalink
Merge pull request #1005 from Lanture1064/dev
Browse files Browse the repository at this point in the history
feat: add llama-factory chart
  • Loading branch information
bjwswang authored Apr 11, 2024
2 parents 07ff0ca + f232984 commit 12e790b
Show file tree
Hide file tree
Showing 7 changed files with 146 additions and 3 deletions.
15 changes: 15 additions & 0 deletions deploy/llama-factory/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: v2
name: llama-factory
version: 0.1.1
appVersion: 0.1.0
description: llama-factory
type: application

keywords:
- llama-factory
- model tune
sources:
- https://github.com/huangqg/LLaMA-Factory
maintainers:
- name: huangqg
url: https://github.com/huangqg
1 change: 1 addition & 0 deletions deploy/llama-factory/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# llama-factory
43 changes: 43 additions & 0 deletions deploy/llama-factory/templates/deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: llama-factory
namespace: {{ .Release.Namespace }}
labels:
app: llama-factory
spec:
selector:
matchLabels:
app: llama-factory
replicas: 1
template:
metadata:
labels:
app: llama-factory
spec:
containers:
- image: {{ .Values.image }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
name: llama-factory
ports:
- containerPort: 7860
protocol: TCP
resources: {{ toYaml .Values.resources | nindent 10 }}
volumeMounts:
- name: model-volume
mountPath: /root/.cache/huggingface
- name: data-volume
mountPath: /app/data
- name: output-volume
mountPath: /app/output
volumes:
- name: model-volume
hostPath:
path: {{ .Values.volume.model }}
- name: data-volume
hostPath:
path: {{ .Values.volume.data }}
- name: output-volume
hostPath:
path: {{ .Values.volume.output }}
terminationGracePeriodSeconds: 30
43 changes: 43 additions & 0 deletions deploy/llama-factory/templates/ingress.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
{{- if .Values.ingress.enable }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
description: llama-factory-ingress
displayName: llama-factory-ingress
httpSend: /
ingress-lb: {{ .Values.ingress.ingressClassName }}
kubernetes.io/ingress.class: {{ .Values.ingress.ingressClassName }}
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_cache static-cache;
proxy_cache_valid 404 10m;
proxy_cache_use_stale error timeout updating http_404 http_500 http_502 http_503 http_504;
proxy_cache_bypass $http_x_purge;
add_header X-Cache-Status $upstream_cache_status; # check X-Cache-Status to see if it's HIT
rewrite ^/(llama-factory-apis)(/|$)(.*)$ /$3 break;
nginx.ingress.kubernetes.io/enable-access-log: "false"
nginx.ingress.kubernetes.io/enable-rewrite-log: "false"
nginx.ingress.kubernetes.io/load-balance: round_robin
nginx.ingress.kubernetes.io/proxy-body-size: ""
nginx.ingress.kubernetes.io/proxy-buffering: "on"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "60"
nginx.ingress.kubernetes.io/server-alias: ""
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/upstream-vhost: $host
labels:
ingress-lb: {{ .Values.ingress.ingressClassName }}
name: llama-factory
namespace: {{ .Release.Namespace }}
spec:
rules:
- host: portal.{{ .Values.ingress.ingressDomain }}
http:
paths:
- backend:
service:
name: llama-factory
port:
number: 7860
path: /
pathType: ImplementationSpecific
{{- end }}
17 changes: 17 additions & 0 deletions deploy/llama-factory/templates/service.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: llama-factory
name: llama-factory
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: http
port: 7860
protocol: TCP
targetPort: 7860
selector:
app: llama-factory
sessionAffinity: None
type: ClusterIP
23 changes: 23 additions & 0 deletions deploy/llama-factory/values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
image: kubeagi/llama-factory:v0.6.1
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: "4"
memory: 12Gi
nvidia.com/gpu: "1"
requests:
cpu: "1"
memory: 1Gi
nvidia.com/gpu: "1"

volume:
model: /path/to/model/or/cache
data: /path/to/data
output: /path/to/output

# ingress configurations for component
ingress:
# set enable to `true` to enable ingress
enable: true
ingressClassName: portal-ingress
ingressDomain: your.domain.nip.io
7 changes: 4 additions & 3 deletions deploy/llms/Dockerfile.fastchat-server
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ RUN apt-get update -y && apt-get install -y python3-distutils curl python3-pip p
ARG PYTHON_INDEX_URL=https://pypi.mirrors.ustc.edu.cn/simple/

WORKDIR /git
RUN git clone https://github.com/lm-sys/FastChat.git
RUN git clone https://github.com/lanture1064/FastK8ts.git

WORKDIR /git/FastChat
RUN pip install --upgrade pip -i ${PYTHON_INDEX_URL} && pip install -e . -i ${PYTHON_INDEX_URL}
WORKDIR /git/FastK8ts
RUN pip install --upgrade pip -i ${PYTHON_INDEX_URL} && pip install -e . -i ${PYTHON_INDEX_URL}
RUN pip install pydantic==1.10.11 -i ${PYTHON_INDEX_URL}

0 comments on commit 12e790b

Please sign in to comment.