diff --git a/deploy/llama-factory/Chart.yaml b/deploy/charts/llama-factory/Chart.yaml similarity index 58% rename from deploy/llama-factory/Chart.yaml rename to deploy/charts/llama-factory/Chart.yaml index ad7ca8025..028244cad 100644 --- a/deploy/llama-factory/Chart.yaml +++ b/deploy/charts/llama-factory/Chart.yaml @@ -9,7 +9,11 @@ keywords: - llama-factory - model tune sources: - - https://github.com/huangqg/LLaMA-Factory + - https://github.com/hiyouga/LLaMA-Factory maintainers: - name: huangqg url: https://github.com/huangqg + - name: bjwswang + url: https://github.com/bjwswang + - name: Lanture1064 + url: https://github.com/Lanture1064 diff --git a/deploy/charts/llama-factory/README.md b/deploy/charts/llama-factory/README.md new file mode 100644 index 000000000..557814a5d --- /dev/null +++ b/deploy/charts/llama-factory/README.md @@ -0,0 +1,17 @@ +# llama-factory + +Originally from [llama-factory](https://github.com/huangqg/helm-charts/tree/main/charts/llama-factory) + +## Usage + +Before install llama-factory: + +- Must replace ``with the real ingress ip address(`172.18.0.2` for example) if ingress is enabled + +### Install via helm + +```shell +helm install -nkubeagi-system lmf . +``` + +If `` is `172.18.0.2`, then the dashboard of llama factory is `https://lmf.172.18.0.2.nip.io`. \ No newline at end of file diff --git a/deploy/charts/llama-factory/templates/deployment.yaml b/deploy/charts/llama-factory/templates/deployment.yaml new file mode 100644 index 000000000..1695ff744 --- /dev/null +++ b/deploy/charts/llama-factory/templates/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-llama-factory + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Release.Name }}-llama-factory +spec: + selector: + matchLabels: + app: {{ .Release.Name }}-llama-factory + replicas: 1 + template: + metadata: + labels: + app: {{ .Release.Name }}-llama-factory + spec: + containers: + - image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + name: llama-factory + ports: + - containerPort: 7860 + protocol: TCP + resources: {{ toYaml .Values.resources | nindent 10 }} + volumeMounts: + {{- if ne .Values.volume.hf-cache "" }} + - name: hf-cache + mountPath: /root/.cache/huggingface/ + {{- end }} + {{- if ne .Values.volume.data "" }} + - name: data + mountPath: /app/data + {{- end }} + {{- if ne .Values.volume.output "" }} + - name: output + mountPath: /app/output + {{- end }} + volumes: + {{- if ne .Values.volume.hf-cache "" }} + - name: hf-cache + hostPath: + path: {{ .Values.volume.hf-cache }} + type: "Directory" + {{- end }} + {{- if ne .Values.volume.data "" }} + - name: data + hostPath: + path: {{ .Values.volume.data }} + type: "Directory" + {{- end }} + {{- if ne .Values.volume.output "" }} + - name: output + hostPath: + path: {{ .Values.volume.output }} + type: "Directory" + {{- end }} + terminationGracePeriodSeconds: 30 \ No newline at end of file diff --git a/deploy/llama-factory/templates/ingress.yaml b/deploy/charts/llama-factory/templates/ingress.yaml similarity index 90% rename from deploy/llama-factory/templates/ingress.yaml rename to deploy/charts/llama-factory/templates/ingress.yaml index 5df44783b..de73cf0ef 100644 --- a/deploy/llama-factory/templates/ingress.yaml +++ b/deploy/charts/llama-factory/templates/ingress.yaml @@ -26,16 +26,16 @@ metadata: nginx.ingress.kubernetes.io/upstream-vhost: $host labels: ingress-lb: {{ .Values.ingress.ingressClassName }} - name: llama-factory + name: {{ .Release.Name }}-llama-factory namespace: {{ .Release.Namespace }} spec: rules: - - host: portal.{{ .Values.ingress.ingressDomain }} + - host: {{ .Release.Name }}.{{ .Values.ingress.ingressDomain }} http: paths: - backend: service: - name: llama-factory + name: {{ .Release.Name }}-llama-factory port: number: 7860 path: / diff --git a/deploy/charts/llama-factory/templates/menus.yaml b/deploy/charts/llama-factory/templates/menus.yaml new file mode 100644 index 000000000..7eb7797e1 --- /dev/null +++ b/deploy/charts/llama-factory/templates/menus.yaml @@ -0,0 +1,52 @@ +{{ if .Capabilities.APIVersions.Has "core.kubebb.k8s.com.cn/v1alpha1/Menu" -}} +apiVersion: core.kubebb.k8s.com.cn/v1alpha1 +kind: Menu +metadata: + name: kubeagi-model-tune +spec: + disabled: false + rankingInColumn: 400 + target: "" + text: 模型调优 + textEn: model_tuneing + parentOwnerReferences: + apiVersion: core.kubebb.k8s.com.cn/v1alpha1 + kind: Menu + name: yunti-kubeagi-portal + uid: "" +--- +apiVersion: core.kubebb.k8s.com.cn/v1alpha1 +kind: Menu +metadata: + name: kubeagi-model-tune-submenu +spec: + parentOwnerReferences: + apiVersion: core.kubebb.k8s.com.cn/v1alpha1 + kind: Menu + name: kubeagi-model-tune + uid: "" + cluster: false + disabled: false + icon: '{"name":"Ai","data":""}' + pathname: /oidc/tune-model + project: false + rankingInColumn: 96 + redirect: https://{{ .Release.Name }}.{{ .Values.ingress.ingressDomain }} + target: iframe + tenant: false + text: 模型调优 + textEn: model_tuning +{{- end }} diff --git a/deploy/llama-factory/templates/service.yaml b/deploy/charts/llama-factory/templates/service.yaml similarity index 64% rename from deploy/llama-factory/templates/service.yaml rename to deploy/charts/llama-factory/templates/service.yaml index 57a772076..4d0f320e1 100644 --- a/deploy/llama-factory/templates/service.yaml +++ b/deploy/charts/llama-factory/templates/service.yaml @@ -2,8 +2,8 @@ apiVersion: v1 kind: Service metadata: labels: - app: llama-factory - name: llama-factory + app: {{ .Release.Name }}-llama-factory + name: {{ .Release.Name }}-llama-factory namespace: {{ .Release.Namespace }} spec: ports: @@ -12,6 +12,6 @@ spec: protocol: TCP targetPort: 7860 selector: - app: llama-factory + app: {{ .Release.Name }}-llama-factory sessionAffinity: None type: ClusterIP diff --git a/deploy/charts/llama-factory/values.yaml b/deploy/charts/llama-factory/values.yaml new file mode 100644 index 000000000..90002c3fd --- /dev/null +++ b/deploy/charts/llama-factory/values.yaml @@ -0,0 +1,29 @@ +# ingress configurations for component +ingress: + # set enable to `true` to enable ingress + enable: true + ingressClassName: portal-ingress + ingressDomain: .nip.io + +# volume configurations for llama-factory +volume: + # hf-cache is the cache path for huggingface model from host + hf-cache: "" + # data is the data path for llama-factory from host + data: "" + # output is the output path for llama-factory from host + output: "" + +# llama-factory configurations +image: kubeagi/llama-factory:v0.6.1 +imagePullPolicy: IfNotPresent +resources: + limits: + cpu: "4" + memory: 12Gi + nvidia.com/gpu: "0" + requests: + cpu: "1" + memory: 1Gi + nvidia.com/gpu: "0" + diff --git a/deploy/llama-factory/README.md b/deploy/llama-factory/README.md deleted file mode 100644 index 9725b7620..000000000 --- a/deploy/llama-factory/README.md +++ /dev/null @@ -1 +0,0 @@ -# llama-factory \ No newline at end of file diff --git a/deploy/llama-factory/templates/deployment.yaml b/deploy/llama-factory/templates/deployment.yaml deleted file mode 100644 index bff568e43..000000000 --- a/deploy/llama-factory/templates/deployment.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: llama-factory - namespace: {{ .Release.Namespace }} - labels: - app: llama-factory -spec: - selector: - matchLabels: - app: llama-factory - replicas: 1 - template: - metadata: - labels: - app: llama-factory - spec: - containers: - - image: {{ .Values.image }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - name: llama-factory - ports: - - containerPort: 7860 - protocol: TCP - resources: {{ toYaml .Values.resources | nindent 10 }} - volumeMounts: - - name: model-volume - mountPath: /root/.cache/huggingface - - name: data-volume - mountPath: /app/data - - name: output-volume - mountPath: /app/output - volumes: - - name: model-volume - hostPath: - path: {{ .Values.volume.model }} - - name: data-volume - hostPath: - path: {{ .Values.volume.data }} - - name: output-volume - hostPath: - path: {{ .Values.volume.output }} - terminationGracePeriodSeconds: 30 \ No newline at end of file diff --git a/deploy/llama-factory/values.yaml b/deploy/llama-factory/values.yaml deleted file mode 100644 index d8366e257..000000000 --- a/deploy/llama-factory/values.yaml +++ /dev/null @@ -1,23 +0,0 @@ -image: kubeagi/llama-factory:v0.6.1 -imagePullPolicy: IfNotPresent -resources: - limits: - cpu: "4" - memory: 12Gi - nvidia.com/gpu: "1" - requests: - cpu: "1" - memory: 1Gi - nvidia.com/gpu: "1" - -volume: - model: /path/to/model/or/cache - data: /path/to/data - output: /path/to/output - -# ingress configurations for component -ingress: - # set enable to `true` to enable ingress - enable: true - ingressClassName: portal-ingress - ingressDomain: your.domain.nip.io