From 7261fb5dc12c26973d8b579c9b702cf1b5a603aa Mon Sep 17 00:00:00 2001 From: Abirdcfly Date: Tue, 10 Oct 2023 11:47:02 +0800 Subject: [PATCH] feat: add llama2 in dashscope as llm Signed-off-by: Abirdcfly --- examples/dashscope/main.go | 16 ++++++++++++++++ pkg/llms/dashscope/api.go | 4 ++++ pkg/llms/dashscope/params.go | 3 ++- pkg/llms/dashscope/response.go | 3 ++- 4 files changed, 24 insertions(+), 2 deletions(-) diff --git a/examples/dashscope/main.go b/examples/dashscope/main.go index c60baa9d2..751241a3b 100644 --- a/examples/dashscope/main.go +++ b/examples/dashscope/main.go @@ -48,6 +48,14 @@ func main() { panic(err) } } + for _, model := range []dashscope.Model{dashscope.LLAMA27BCHATV2, dashscope.LLAMA213BCHATV2} { + klog.V(0).Infof("\nChat with %s\n", model) + resp, err := sampleChatWithLlama2(apiKey, model) + if err != nil { + panic(err) + } + klog.V(0).Infof("Response: \n %s\n", resp) + } klog.Infoln("sample chat done") } @@ -62,6 +70,14 @@ func sampleChat(apiKey string, model dashscope.Model) (llms.Response, error) { return client.Call(params.Marshal()) } +func sampleChatWithLlama2(apiKey string, model dashscope.Model) (llms.Response, error) { + client := dashscope.NewDashScope(apiKey, false) + params := dashscope.DefaultModelParams() + params.Model = model + params.Input.Prompt = samplePrompt + return client.Call(params.Marshal()) +} + func sampleSSEChat(apiKey string, model dashscope.Model) error { client := dashscope.NewDashScope(apiKey, true) params := dashscope.DefaultModelParams() diff --git a/pkg/llms/dashscope/api.go b/pkg/llms/dashscope/api.go index 2d1153370..f84c13a49 100644 --- a/pkg/llms/dashscope/api.go +++ b/pkg/llms/dashscope/api.go @@ -30,8 +30,12 @@ const ( type Model string const ( + // 通义千问对外开源的 14B / 7B 规模参数量的经过人类指令对齐的 chat 模型 QWEN14BChat Model = "qwen-14b-chat" QWEN7BChat Model = "qwen-7b-chat" + // LLaMa2 系列大语言模型由 Meta 开发并公开发布,其规模从 70 亿到 700 亿参数不等。在灵积上提供的 llama2-7b-chat-v2 和 llama2-13b-chat-v2,分别为 7B 和 13B 规模的 LLaMa2 模型,针对对话场景微调优化后的版本。 + LLAMA27BCHATV2 Model = "llama2-7b-chat-v2" + LLAMA213BCHATV2 Model = "llama2-13b-chat-v2" ) var _ llms.LLM = (*DashScope)(nil) diff --git a/pkg/llms/dashscope/params.go b/pkg/llms/dashscope/params.go index 92390ea3a..7c825fa46 100644 --- a/pkg/llms/dashscope/params.go +++ b/pkg/llms/dashscope/params.go @@ -48,7 +48,8 @@ type ModelParams struct { // +kubebuilder:object:generate=true type Input struct { - Messages []Message `json:"messages"` + Messages []Message `json:"messages,omitempty"` + Prompt string `json:"prompt,omitempty"` } type Parameters struct { diff --git a/pkg/llms/dashscope/response.go b/pkg/llms/dashscope/response.go index dccc65b79..19d70554e 100644 --- a/pkg/llms/dashscope/response.go +++ b/pkg/llms/dashscope/response.go @@ -35,7 +35,8 @@ type Response struct { } type Output struct { - Choices []Choice `json:"choices"` + Choices []Choice `json:"choices,omitempty"` + Text string `json:"text,omitempty"` } type FinishReason string