Skip to content

Commit

Permalink
[Feature] Add end_str for turbomind (#859)
Browse files Browse the repository at this point in the history
* fix

* update

* fix internlm1

* fix docs

* remove sys
  • Loading branch information
RunningLeon authored Feb 1, 2024
1 parent 5c6dc90 commit 4c87e77
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 8 deletions.
31 changes: 31 additions & 0 deletions configs/eval_internlm_chat_turbomind.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,14 @@
],
eos_token_id=103028)

internlm2_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)

# config for internlm-chat-7b
internlm_chat_7b = dict(
type=TurboMindModel,
Expand All @@ -41,6 +49,28 @@
concurrency=32,
meta_template=internlm_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<eoa>',
)

# config for internlm-chat-7b
internlm2_chat_7b = dict(
type=TurboMindModel,
abbr='internlm2-chat-7b-turbomind',
path='internlm/internlm2-chat-7b',
engine_config=dict(session_len=2048,
max_batch_size=32,
rope_scaling_factor=1.0),
gen_config=dict(top_k=1,
top_p=0.8,
temperature=1.0,
max_new_tokens=100),
max_out_len=100,
max_seq_len=2048,
batch_size=32,
concurrency=32,
meta_template=internlm2_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>'
)

# config for internlm-chat-20b
Expand All @@ -61,6 +91,7 @@
concurrency=8,
meta_template=internlm_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<eoa>',
)

models = [internlm_chat_20b]
1 change: 1 addition & 0 deletions docs/en/advanced_guides/evaluation_turbomind.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ internlm_20b = dict(
batch_size=8,
concurrency=8,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<eoa>'
)

models = [internlm_20b]
Expand Down
1 change: 1 addition & 0 deletions docs/zh_cn/advanced_guides/evaluation_turbomind.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ internlm_20b = dict(
batch_size=8,
concurrency=8,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<eoa>'
)

models = [internlm_20b]
Expand Down
32 changes: 24 additions & 8 deletions opencompass/models/turbomind.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ class TurboMindModel(BaseModel):
arguments like session_len, max_batch_size for TurboMind.
gen_config (Dict, optional): Generation config to set
arguments like top_k, top_p, temperature.
end_str (str, optional): Whether to trim generated strings with end_str
if the model has special ending strings that are not handled well.
Defaults to None.
"""

def __init__(self,
Expand All @@ -42,7 +45,8 @@ def __init__(self,
max_seq_len: int = 2048,
meta_template: Optional[Dict] = None,
engine_config: Optional[Dict] = None,
gen_config: Optional[Dict] = None):
gen_config: Optional[Dict] = None,
end_str: Optional[str] = None):
super().__init__(path=path,
max_seq_len=max_seq_len,
meta_template=meta_template)
Expand All @@ -62,6 +66,7 @@ def __init__(self,
]
self.generator_ids = [i + 1 for i in range(concurrency)]
self.gen_config = gen_config
self.end_str = end_str

def generate(
self,
Expand Down Expand Up @@ -90,11 +95,15 @@ def generate(
for batch_input in batch_inputs:
with ThreadPoolExecutor() as executor:
_results = list(
executor.map(self._generate,
self.generators[:len(batch_input)],
self.generator_ids[:len(batch_input)],
batch_input, [max_out_len] * len(batch_input),
[self.gen_config] * len(batch_input)))
executor.map(
self._generate,
self.generators[:len(batch_input)],
self.generator_ids[:len(batch_input)],
batch_input,
[max_out_len] * len(batch_input),
[self.gen_config] * len(batch_input),
[self.end_str] * len(batch_input),
))
results += _results
return results

Expand All @@ -114,7 +123,8 @@ def _generate(self,
session_id,
prompt: str or PromptList,
max_out_len: int,
gen_config=None) -> str:
gen_config=None,
end_str: Optional[str] = None) -> str:
"""Generate results given a list of inputs.
Args:
Expand All @@ -124,7 +134,10 @@ def _generate(self,
max_out_len (int): The maximum length of the output.
gen_config (EngineGenerationConfig, optional): Generation
config to set arguments like top_k, top_p, temperature.
end_str (str, optional): Whether to trim generated strings
with end_str if the model has special ending strings
that are not handled well.
Defaults to None.
Returns:
str: The generated string.
"""
Expand All @@ -144,4 +157,7 @@ def _generate(self,
_, output_ids, _ = outputs
response = self.tokenizer.decode(output_ids)
response = valid_str(response)
# used to trim
if end_str:
response = response.split(end_str)[0]
return response

0 comments on commit 4c87e77

Please sign in to comment.