From f49ac9895336ff8f6936b91ab04725020b5c53b3 Mon Sep 17 00:00:00 2001 From: whcao <41630003+HIT-cwh@users.noreply.github.com> Date: Fri, 19 Jul 2024 17:50:12 +0800 Subject: [PATCH] fix lint (#856) * fix lint * fix lint --- .../minicpm/minicpm_1b_full_custom_pretrain_e1.py | 2 +- .../sft/minicpm/minicpm_1b_full_custom_pretrain_e1.py | 2 +- xtuner/configs/minicpm/1_2b/minicpm_1b_dpo_qlora.py | 8 +++++--- .../configs/minicpm/1_2b/minicpm_1b_lora_alpaca_zh_e3.py | 5 ++--- xtuner/configs/minicpm/2b/minicpm_2b_dpo_qlora.py | 8 +++++--- xtuner/configs/minicpm/2b/minicpm_2b_lora_alpaca_zh_e3.py | 5 ++--- xtuner/utils/templates.py | 4 +--- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/xtuner/configs/custom_dataset/pretrain/minicpm/minicpm_1b_full_custom_pretrain_e1.py b/xtuner/configs/custom_dataset/pretrain/minicpm/minicpm_1b_full_custom_pretrain_e1.py index bafe7f42c..fc0da5ed3 100644 --- a/xtuner/configs/custom_dataset/pretrain/minicpm/minicpm_1b_full_custom_pretrain_e1.py +++ b/xtuner/configs/custom_dataset/pretrain/minicpm/minicpm_1b_full_custom_pretrain_e1.py @@ -36,7 +36,7 @@ use_varlen_attn = False # Data -data_files = ['/root/ld/pull_request/xtuner/xtuner/configs/custom_dataset/pretrain/minicpm/pretrain.json'] +data_files = ['/path/to/json/file.json'] max_length = 2048 pack_to_max_length = True diff --git a/xtuner/configs/custom_dataset/sft/minicpm/minicpm_1b_full_custom_pretrain_e1.py b/xtuner/configs/custom_dataset/sft/minicpm/minicpm_1b_full_custom_pretrain_e1.py index bafe7f42c..fc0da5ed3 100644 --- a/xtuner/configs/custom_dataset/sft/minicpm/minicpm_1b_full_custom_pretrain_e1.py +++ b/xtuner/configs/custom_dataset/sft/minicpm/minicpm_1b_full_custom_pretrain_e1.py @@ -36,7 +36,7 @@ use_varlen_attn = False # Data -data_files = ['/root/ld/pull_request/xtuner/xtuner/configs/custom_dataset/pretrain/minicpm/pretrain.json'] +data_files = ['/path/to/json/file.json'] max_length = 2048 pack_to_max_length = True diff --git a/xtuner/configs/minicpm/1_2b/minicpm_1b_dpo_qlora.py b/xtuner/configs/minicpm/1_2b/minicpm_1b_dpo_qlora.py index ed48f29d0..b0fc4556a 100644 --- a/xtuner/configs/minicpm/1_2b/minicpm_1b_dpo_qlora.py +++ b/xtuner/configs/minicpm/1_2b/minicpm_1b_dpo_qlora.py @@ -1,13 +1,15 @@ # Copyright (c) OpenMMLab. All rights reserved. -from datasets import load_dataset import torch +from datasets import load_dataset from mmengine.dataset import DefaultSampler from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, LoggerHook, ParamSchedulerHook) from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR -from torch.optim import AdamW -from transformers import AutoModelForCausalLM, AutoTokenizer,BitsAndBytesConfig from peft import LoraConfig +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig) + from xtuner.dataset.collate_fns.preference_collate_fn import \ preference_collate_fn from xtuner.dataset.preference_dataset import (build_preference_dataset, diff --git a/xtuner/configs/minicpm/1_2b/minicpm_1b_lora_alpaca_zh_e3.py b/xtuner/configs/minicpm/1_2b/minicpm_1b_lora_alpaca_zh_e3.py index 428bdcd68..e0ed46147 100644 --- a/xtuner/configs/minicpm/1_2b/minicpm_1b_lora_alpaca_zh_e3.py +++ b/xtuner/configs/minicpm/1_2b/minicpm_1b_lora_alpaca_zh_e3.py @@ -7,8 +7,7 @@ from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR from peft import LoraConfig from torch.optim import AdamW -from transformers import (AutoModelForCausalLM, AutoTokenizer, - BitsAndBytesConfig) +from transformers import AutoModelForCausalLM, AutoTokenizer from xtuner.dataset import process_hf_dataset from xtuner.dataset.collate_fns import default_collate_fn @@ -78,7 +77,7 @@ pretrained_model_name_or_path=pretrained_model_name_or_path, trust_remote_code=True, torch_dtype=torch.float16, - ), + ), lora=dict( type=LoraConfig, r=64, diff --git a/xtuner/configs/minicpm/2b/minicpm_2b_dpo_qlora.py b/xtuner/configs/minicpm/2b/minicpm_2b_dpo_qlora.py index 6b395ae3e..abf1e7ef9 100644 --- a/xtuner/configs/minicpm/2b/minicpm_2b_dpo_qlora.py +++ b/xtuner/configs/minicpm/2b/minicpm_2b_dpo_qlora.py @@ -1,13 +1,15 @@ # Copyright (c) OpenMMLab. All rights reserved. -from datasets import load_dataset import torch +from datasets import load_dataset from mmengine.dataset import DefaultSampler from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, LoggerHook, ParamSchedulerHook) from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR -from torch.optim import AdamW -from transformers import AutoModelForCausalLM, AutoTokenizer,BitsAndBytesConfig from peft import LoraConfig +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig) + from xtuner.dataset.collate_fns.preference_collate_fn import \ preference_collate_fn from xtuner.dataset.preference_dataset import (build_preference_dataset, diff --git a/xtuner/configs/minicpm/2b/minicpm_2b_lora_alpaca_zh_e3.py b/xtuner/configs/minicpm/2b/minicpm_2b_lora_alpaca_zh_e3.py index ff1988a68..a50fe91ab 100644 --- a/xtuner/configs/minicpm/2b/minicpm_2b_lora_alpaca_zh_e3.py +++ b/xtuner/configs/minicpm/2b/minicpm_2b_lora_alpaca_zh_e3.py @@ -7,8 +7,7 @@ from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR from peft import LoraConfig from torch.optim import AdamW -from transformers import (AutoModelForCausalLM, AutoTokenizer, - BitsAndBytesConfig) +from transformers import AutoModelForCausalLM, AutoTokenizer from xtuner.dataset import process_hf_dataset from xtuner.dataset.collate_fns import default_collate_fn @@ -78,7 +77,7 @@ pretrained_model_name_or_path=pretrained_model_name_or_path, trust_remote_code=True, torch_dtype=torch.float16, - ), + ), lora=dict( type=LoraConfig, r=64, diff --git a/xtuner/utils/templates.py b/xtuner/utils/templates.py index ae8ba1523..59b472731 100644 --- a/xtuner/utils/templates.py +++ b/xtuner/utils/templates.py @@ -130,9 +130,7 @@ SYSTEM=('[INST] {system} [/INST]\n'), INSTRUCTION=('[INST] {input} [/INST]'), SEP='\n'), - minicpm=dict( - INSTRUCTION=('<用户> {input} '), - SEP='\n'), + minicpm=dict(INSTRUCTION=('<用户> {input} '), SEP='\n'), gemma=dict( # `system` field is extended by xtuner SYSTEM=('system\n{system}\n'),