-
Notifications
You must be signed in to change notification settings - Fork 3
/
cli_dialogue_inference_demo.py
133 lines (116 loc) · 3.96 KB
/
cli_dialogue_inference_demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import torch
import logging
import argparse
from transformers import LlamaTokenizer, LlamaForCausalLM
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(description="Evaluate the dialogue capability of ChemDFM-13B through CLI.")
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model",
required=True,
)
parser.add_argument(
"--max_new_tokens",
type=int,
default=1024,
help='Specify num of return sequences',
)
parser.add_argument(
"--do_sample",
action="store_true",
help="Whether to sample"
)
parser.add_argument(
"--temperature",
type=float,
default=0.1,
help='Specify temperature',
)
parser.add_argument(
"--top_k",
type=int,
default=40,
help='Specify num of top k',
)
parser.add_argument(
"--top_p",
type=float,
default=0.9,
help='Specify num of top p',
)
parser.add_argument(
"--num_beams",
type=int,
default=1,
help='Specify num of beams',
)
parser.add_argument(
"--num_beam_groups",
type=int,
default=1,
help='Specify num of beam groups',
)
parser.add_argument(
"--repetition_penalty",
type=float,
default=1.0,
help='Specify num of repetition penalty',
)
parser.add_argument(
"--max_round",
type=int,
default=0
)
args = parser.parse_args()
return args
def response(model, tokenizer, inputs, args, device):
tokenized_inputs = tokenizer(inputs, return_tensors="pt").to(device).input_ids
generate_ids = model.generate(tokenized_inputs,
max_new_tokens=args.max_new_tokens,
do_sample=args.do_sample,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
num_beams=args.num_beams,
num_beam_groups=args.num_beam_groups,
repetition_penalty=args.repetition_penalty,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id)
result = tokenizer.batch_decode(generate_ids,
skip_special_tokens=True,
clean_up_tokenization_spaces=False)
returns = result[0][len(inputs):]
return returns.strip()
def encapsulate_history_and_current_input_into_prompt(history, cur_input):
prompt = ""
for idx, (human, assistant) in enumerate(history):
prompt += f"[Round {idx}]\nHuman: {human}\nAssistant: {assistant}\n"
prompt += f"[Round {len(history)}]\nHuman: {cur_input}\nAssistant:"
return prompt
def main():
args = parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Loading tokenizer...")
tokenizer = LlamaTokenizer.from_pretrained(args.model_name_or_path, use_fast=True)
print("Loading model...")
model = LlamaForCausalLM.from_pretrained(args.model_name_or_path, device_map='auto', torch_dtype=torch.float16)
model.eval()
history = []
while True:
inputs = input("Query: ")
if inputs.strip() == '/clear':
print()
print('------------------- new conversation start -------------------')
print()
history = []
elif inputs.strip() == '/back':
history = history[:-1]
else:
prompt = encapsulate_history_and_current_input_into_prompt(history[-args.max_round:], inputs)
returns = response(model, tokenizer, prompt, args, device)
if args.max_round > 0:
history.append((inputs, returns))
if __name__ == "__main__":
main()