From 53abdbcb236809d973b1a45bbee45611d0acef70 Mon Sep 17 00:00:00 2001 From: iMountTai <2506700016@qq.com> Date: Thu, 13 Apr 2023 09:53:16 +0800 Subject: [PATCH] fix webui tokenizer --- README.md | 3 +-- README_EN.md | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 8282171..167355c 100644 --- a/README.md +++ b/README.md @@ -279,8 +279,7 @@ cp loras/chinese-alpaca-lora-7b/special_tokens_map.json models/llama-7b-hf/ cp loras/chinese-alpaca-lora-7b/tokenizer_config.json models/llama-7b-hf/ # 修改/modules/LoRA.py文件,大约在第28行 -shared.model.resize_token_embeddings(49954) -assert shared.model.get_input_embeddings().weight.size(0) == 49954 +shared.model.resize_token_embeddings(len(shared.tokenizer)) shared.model = PeftModel.from_pretrained(shared.model, Path(f"{shared.args.lora_dir}/{lora_name}"), **params) # 接下来就可以愉快的运行了,参考https://github.com/oobabooga/text-generation-webui/wiki/Using-LoRAs diff --git a/README_EN.md b/README_EN.md index 42456b1..f1e9ff0 100644 --- a/README_EN.md +++ b/README_EN.md @@ -282,8 +282,7 @@ cp loras/chinese-alpaca-lora-7b/special_tokens_map.json models/llama-7b-hf/ cp loras/chinese-alpaca-lora-7b/tokenizer_config.json models/llama-7b-hf/ # modify /modules/LoRA.py file -shared.model.resize_token_embeddings(49954) -assert shared.model.get_input_embeddings().weight.size(0) == 49954 +shared.model.resize_token_embeddings(len(shared.tokenizer)) shared.model = PeftModel.from_pretrained(shared.model, Path(f"{shared.args.lora_dir}/{lora_name}"), **params) # Great! You can now run the tool. Please refer to https://github.com/oobabooga/text-generation-webui/wiki/Using-LoRAs for instructions on how to use LoRAs