diff --git a/README.md b/README.md index 0fe36b36..90f71aa6 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,6 @@

wukong-project 捐赠 - Travis Python3.7+ docker-pulls browse-code @@ -87,6 +86,7 @@ wukong-robot 被唤醒后,用户的语音指令先经过 ASR 引擎进行 ASR - [Siri 联动 wukong-robot + ChatGPT](https://www.bilibili.com/video/BV1yY4y1y7oW) - [小爱同学联动 wukong-robot](https://www.bilibili.com/video/BV1eg4y1b75Y) - [教程:基于树莓派&wukong-robot&VITS的AI泠鸢开源智能音箱的初步实现(by @二维环状无限深势阱)](https://www.bilibili.com/video/BV1Sc411K7dv) + - [教程:实现一个虚拟管家:贾维斯(by @Echo)](https://zhuanlan.zhihu.com/p/655865035) * 后台管理端 Demo - 体验地址:https://bot.hahack.com (体验用户名:wukong;体验密码:wukong@2019) diff --git a/requirements.txt b/requirements.txt index a7154543..a79d8bc0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ python-dateutil==2.7.5 watchdog==0.9.0 pytz==2018.9 fire==0.1.3 -tornado==6.2 +tornado==6.3.3 markdown==3.0.1 semver==2.8.1 websocket==0.2.1 @@ -20,3 +20,4 @@ apscheduler asyncio edge-tts nest_asyncio +funasr_onnx diff --git a/robot/AI.py b/robot/AI.py index 5ea8a96b..aca9ba9d 100644 --- a/robot/AI.py +++ b/robot/AI.py @@ -260,6 +260,8 @@ def __init__( if proxy: logger.info(f"{self.SLUG} 使用代理:{proxy}") self.openai.proxy = proxy + else: + self.openai.proxy = None except Exception: logger.critical("OpenAI 初始化失败,请升级 Python 版本至 > 3.6") @@ -296,8 +298,8 @@ def stream_chat(self, texts): "Authorization": "Bearer " + self.openai.api_key, } - data = {"model": "gpt-3.5-turbo", "messages": self.context, "stream": True} - logger.info("开始流式请求") + data = {"model": self.model, "messages": self.context, "stream": True} + logger.info(f"使用模型:{self.model},开始流式请求") url = self.api_base + "/completions" # 请求接收流式数据 try: diff --git a/robot/ASR.py b/robot/ASR.py index 70bb25e9..fbda2768 100755 --- a/robot/ASR.py +++ b/robot/ASR.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import json from aip import AipSpeech -from .sdk import TencentSpeech, AliSpeech, XunfeiSpeech, BaiduSpeech +from .sdk import TencentSpeech, AliSpeech, XunfeiSpeech, BaiduSpeech, FunASREngine from . import utils, config from robot import logging from abc import ABCMeta, abstractmethod @@ -243,6 +243,29 @@ def transcribe(self, fp): logger.critical(f"{self.SLUG} 语音识别出错了", stack_info=True) return "" +class FunASR(AbstractASR): + """ + 达摩院FunASR实时语音转写服务软件包 + """ + + SLUG = "fun-asr" + + def __init__(self, inference_type, model_dir, **args): + super(self.__class__, self).__init__() + self.engine = FunASREngine.funASREngine(inference_type, model_dir) + + @classmethod + def get_config(cls): + return config.get("fun_asr", {}) + + def transcribe(self, fp): + result = self.engine(fp) + if result: + logger.info(f"{self.SLUG} 语音识别到了:{result}") + return result + else: + logger.critical(f"{self.SLUG} 语音识别出错了", stack_info=True) + return "" def get_engine_by_slug(slug=None): """ diff --git a/robot/sdk/FunASREngine.py b/robot/sdk/FunASREngine.py new file mode 100644 index 00000000..bdf0f874 --- /dev/null +++ b/robot/sdk/FunASREngine.py @@ -0,0 +1,22 @@ + +from typing import Any + + +class funASREngine(object): + def __init__(self, inference_type, model_dir=''): + assert inference_type in ['onnxruntime'] # 当前只实现了onnxruntime的推理方案 + self.inference_type = inference_type + if self.inference_type == 'onnxruntime': + # 调用下面的引擎进初始化引擎太慢了,因此放在条件分支里面 + from funasr_onnx import Paraformer + self.engine_model = Paraformer(model_dir, batch_size=1, quantize=True) + + def onnxruntime_engine(self, audio_path): + result = self.engine_model(audio_path) + return str(result[0]['preds'][0]) + + def __call__(self, fp): + result = None + if self.inference_type == 'onnxruntime': + result = self.onnxruntime_engine(fp) + return result \ No newline at end of file diff --git a/static/default.yml b/static/default.yml index 479fb758..8a2f1db9 100755 --- a/static/default.yml +++ b/static/default.yml @@ -110,6 +110,7 @@ tts_engine: edge-tts # tencent-asr - 腾讯云语音识别(推荐) # azure-asr - 微软语音识别 # openai - OpenAI Whisper +# fun-asr - 达摩院FunASR语音识别 asr_engine: baidu-asr # 百度语音服务 @@ -161,6 +162,22 @@ tencent_yuyin: voiceType: 0 # 0: 女声1;1:男生1;2:男生2 language: 1 # 1: 中文;2:英文 +# 达摩院FunASR实时语音转写服务软件包 +fun_asr: + # 导出模型流程:https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/python/libtorch#export-the-model + # 1.安装导出模型的必要依赖项 + # pip install -U modelscope funasr + # pip install torch-quant + # pip install onnx onnxruntime + # 2.导出模型权重 + # python -m funasr.export.export_model --model-name damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch --export-dir ./export --type torch --quantize True + # 3.需要注意 + # 当前使用的onnxruntime的推理方案,第一次初始化需要推理框架内部会将模型参数文件转换为onnx格式文件,大约需要5分钟 + # 从第二次载入时,识别框架初始,载入模型约需要等待20秒左右 + inference_type: onnxruntime # FunASR支持本地onnxruntime,libtorch推理框架,以及client-server方式,当前只实现了onnxruntime方式,相对部署流程较为简单 + model_dir: '/xxxxxxxxxxxxxxxxxxx/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' # 上述流程导出的模型的模型文件的绝对路径 + + # HanTTS 服务 han-tts: # 所使用的语音库目录