Skip to content

Commit

Permalink
[Feature] add mtbench (#829)
Browse files Browse the repository at this point in the history
* add mtbench

* add mtbench

* Update configs/datasets/subjective/multiround/mtbench_judgeby_gpt4.py

Co-authored-by: Songyang Zhang <[email protected]>

* Update configs/datasets/subjective/multiround/mtbench_judgeby_gpt4.py

Co-authored-by: Songyang Zhang <[email protected]>

* Update opencompass/datasets/subjective/__init__.py

Co-authored-by: Songyang Zhang <[email protected]>

* Update opencompass/datasets/subjective/mtbench.py

Co-authored-by: Songyang Zhang <[email protected]>

* fix mtbench

---------

Co-authored-by: Songyang Zhang <[email protected]>
  • Loading branch information
bittersweet1999 and tonysy authored Jan 24, 2024
1 parent e059a5c commit 2ee8e8a
Show file tree
Hide file tree
Showing 10 changed files with 609 additions and 11 deletions.
64 changes: 64 additions & 0 deletions configs/datasets/subjective/multiround/mtbench_pair_judge.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import ChatInferencer, GenInferencer
from opencompass.openicl.icl_evaluator import LMEvaluator
from opencompass.datasets import MTBenchDataset


subjective_reader_cfg = dict(
input_columns=['dialogue', 'capability', 'system_prompt', 'prompt_template'],
output_column='judge',
)

subjective_all_sets = [
"mtbench",
]
data_path ="data/subjective/"

subjective_datasets = []

for _name in subjective_all_sets:
subjective_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template="""{dialogue}""",
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=ChatInferencer, max_seq_len=4096, max_out_len=512, infer_mode='every'),
)

subjective_eval_cfg = dict(
evaluator=dict(
type=LMEvaluator,
infer_order='double',
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="{system_prompt}")
],
round=[
dict(
role='HUMAN',
prompt = "{prompt_template}"
),
]),
),
),
pred_role="BOT",
)

subjective_datasets.append(
dict(
abbr=f"{_name}",
type=MTBenchDataset,
path=data_path,
name=_name,
judge_type='pair',
reader_cfg=subjective_reader_cfg,
infer_cfg=subjective_infer_cfg,
eval_cfg=subjective_eval_cfg
))
62 changes: 62 additions & 0 deletions configs/datasets/subjective/multiround/mtbench_single_judge.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import ChatInferencer, GenInferencer
from opencompass.openicl.icl_evaluator import LMEvaluator
from opencompass.datasets import MTBenchDataset


subjective_reader_cfg = dict(
input_columns=['dialogue', 'capability', 'system_prompt', 'prompt_template'],
output_column='judge',
)

subjective_all_sets = [
"mtbench",
]
data_path ="data/subjective/"

subjective_datasets = []

for _name in subjective_all_sets:
subjective_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template="""{dialogue}""",
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=ChatInferencer, max_seq_len=4096, max_out_len=512, infer_mode='every'),
)

subjective_eval_cfg = dict(
evaluator=dict(
type=LMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="{system_prompt}")
],
round=[
dict(
role='HUMAN',
prompt = "{prompt_template}"
),
]),
),
),
pred_role="BOT",
)

subjective_datasets.append(
dict(
abbr=f"{_name}",
type=MTBenchDataset,
path=data_path,
name=_name,
reader_cfg=subjective_reader_cfg,
infer_cfg=subjective_infer_cfg,
eval_cfg=subjective_eval_cfg
))
116 changes: 116 additions & 0 deletions configs/eval_subjective_mtbench.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
from mmengine.config import read_base
with read_base():
from .models.qwen.hf_qwen_7b_chat import models as hf_qwen_7b_chat
from .models.qwen.hf_qwen_14b_chat import models as hf_qwen_14b_chat
from .models.chatglm.hf_chatglm3_6b import models as hf_chatglm3_6b
from .models.baichuan.hf_baichuan2_7b_chat import models as hf_baichuan2_7b
from .models.hf_internlm.hf_internlm_chat_20b import models as hf_internlm_chat_20b
from .models.judge_llm.auto_j.hf_autoj_eng_13b import models as hf_autoj
from .models.judge_llm.judgelm.hf_judgelm_33b_v1 import models as hf_judgelm
from .models.judge_llm.pandalm.hf_pandalm_7b_v1 import models as hf_pandalm
from .datasets.subjective.multiround.mtbench_single_judge import subjective_datasets
#from .datasets.subjective.multiround.mtbench_pair_judge import subjective_datasets

datasets = [*subjective_datasets]

from opencompass.models import HuggingFaceCausalLM, HuggingFace, HuggingFaceChatGLM3
from opencompass.models.openai_api import OpenAIAllesAPIN
from opencompass.partitioners import NaivePartitioner, SizePartitioner
from opencompass.partitioners.sub_naive import SubjectiveNaivePartitioner
from opencompass.partitioners.sub_size import SubjectiveSizePartitioner
from opencompass.runners import LocalRunner
from opencompass.runners import SlurmSequentialRunner
from opencompass.tasks import OpenICLInferTask
from opencompass.tasks.subjective_eval import SubjectiveEvalTask
from opencompass.summarizers import MTBenchSummarizer


# -------------Inferen Stage ----------------------------------------

models = [*hf_chatglm3_6b, *hf_qwen_7b_chat]
infer = dict(
partitioner=dict(type=SizePartitioner, max_task_size=100),
runner=dict(
type=SlurmSequentialRunner,
partition='llmeval',
quotatype='auto',
max_num_workers=256,
task=dict(type=OpenICLInferTask)),
)

# -------------Evalation Stage ----------------------------------------


## ------------- JudgeLLM Configuration
api_meta_template = dict(
round=[
dict(role='HUMAN', api_role='HUMAN'),
dict(role='BOT', api_role='BOT', generate=True),
]
)

judge_model = dict(
abbr='GPT4-Turbo',
type=OpenAIAllesAPIN, path='gpt-4-1106-preview',
key='xxxx', # The key will be obtained from $OPENAI_API_KEY, but you can write down your key here as well
url='xxxx',
meta_template=api_meta_template,
query_per_second=16,
max_out_len=2048,
max_seq_len=2048,
batch_size=8,
temperature = 0
)

## ------------- Evaluation Configuration
'''
## pair evaluation
eval = dict(
partitioner=dict(
type=SubjectiveSizePartitioner,
max_task_size=100,
mode='m2n',
base_models = [*hf_chatglm3_6b, ],
compare_models = models
),
runner=dict(
type=SlurmSequentialRunner,
partition='llmeval',
quotatype='auto',
max_num_workers=32,
task=dict(
type=SubjectiveEvalTask,
judge_cfg=judge_model
)),
)
summarizer = dict(
type=MTBenchSummarizer, judge_type='pair'
)
'''

## single evaluation
eval = dict(
partitioner=dict(
type=SubjectiveSizePartitioner,
max_task_size=100,
mode='singlescore',
models = models
),
runner=dict(
type=SlurmSequentialRunner,
partition='llmeval',
quotatype='auto',
max_num_workers=32,
task=dict(
type=SubjectiveEvalTask,
judge_cfg=judge_model
)),
)

summarizer = dict(
type=MTBenchSummarizer, judge_type='single'
)

work_dir = 'outputs/mtbench/'
1 change: 1 addition & 0 deletions opencompass/datasets/subjective/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,6 @@
from .corev2 import Corev2Dataset # noqa: F401, F403
from .creationbench import CreationBenchDataset # noqa: F401, F403
from .information_retrival import IRDataset # noqa: F401, F403
from .mtbench import MTBenchDataset # noqa: F401, F403
from .multiround import MultiroundDataset # noqa: F401, F403
from .subjective_cmp import SubjectiveCmpDataset # noqa: F401, F403
Loading

0 comments on commit 2ee8e8a

Please sign in to comment.