Skip to content

Commit

Permalink
Merge branch 'main' of github.com:open-compass/opencompass into github
Browse files Browse the repository at this point in the history
  • Loading branch information
liuhongwei committed Jun 17, 2024
2 parents a4bac18 + e0d7808 commit e407609
Show file tree
Hide file tree
Showing 142 changed files with 5,004 additions and 897 deletions.
2 changes: 2 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ exclude: |
opencompass/datasets/teval/|
opencompass/datasets/NPHardEval/|
opencompass/datasets/TheoremQA|
opencompass/datasets/subjective/mtbench101.py|
docs/zh_cn/advanced_guides/compassbench_intro.md
)
repos:
Expand All @@ -35,6 +36,7 @@ repos:
exclude: |
(?x)^(
.*\.jsonl|
.*\.md.template|
configs/
)
- repo: https://github.com/pre-commit/pre-commit-hooks
Expand Down
4 changes: 0 additions & 4 deletions configs/datasets/CHARM/charm_rea_gen.py

This file was deleted.

50 changes: 50 additions & 0 deletions configs/datasets/CHARM/charm_reason_cot_only_gen_f7b7d3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import os
from mmengine.config import read_base

from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import CharmDataset, charm_reason_postprocess, CharmReasonEvaluator

with read_base():
from .charm_reason_settings import charm_tasks, settings

settings = [s for s in settings if s[0] in ['ZH-CoT', 'EN-CoT']]

charm_reason_datasets = []

for _cot, _cot_prefix, dataset_path, fewshot_example_path, prompt_template in settings:
for _task in charm_tasks:
_fewshot_example_file = os.path.join(fewshot_example_path, f'{_task}_{_cot}.txt')
with open(_fewshot_example_file, 'r') as f:
_hint = f.read()

charm_reason_reader_cfg = dict(input_columns=['input'], output_column='target')

charm_reason_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[dict(role='HUMAN', prompt=prompt_template.format(_hint=_hint) + _cot_prefix)]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)

charm_reason_eval_cfg = dict(
evaluator=dict(type=CharmReasonEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=charm_reason_postprocess),
dataset_postprocessor=dict(type=charm_reason_postprocess),
)

charm_reason_datasets.append(
dict(
type=CharmDataset,
path=dataset_path,
name=_task,
abbr='charm-reason-' + _task + '_' + _cot,
reader_cfg=charm_reason_reader_cfg,
infer_cfg=charm_reason_infer_cfg.copy(),
eval_cfg=charm_reason_eval_cfg.copy(),
)
)
4 changes: 4 additions & 0 deletions configs/datasets/CHARM/charm_reason_gen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from mmengine.config import read_base

with read_base():
from .charm_reason_gen_f8fca2 import charm_reason_datasets # noqa: F401, F403
49 changes: 49 additions & 0 deletions configs/datasets/CHARM/charm_reason_gen_f8fca2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import os
from mmengine.config import read_base

from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import CharmDataset, charm_reason_postprocess, CharmReasonEvaluator

with read_base():
from .charm_reason_settings import charm_tasks, settings


charm_reason_datasets = []

for _cot, _cot_prefix, dataset_path, fewshot_example_path, prompt_template in settings:
for _task in charm_tasks:
_fewshot_example_file = os.path.join(fewshot_example_path, f'{_task}_{_cot}.txt')
with open(_fewshot_example_file, 'r') as f:
_hint = f.read()

charm_reason_reader_cfg = dict(input_columns=['input'], output_column='target')

charm_reason_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[dict(role='HUMAN', prompt=prompt_template.format(_hint=_hint) + _cot_prefix)]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)

charm_reason_eval_cfg = dict(
evaluator=dict(type=CharmReasonEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=charm_reason_postprocess),
dataset_postprocessor=dict(type=charm_reason_postprocess),
)

charm_reason_datasets.append(
dict(
type=CharmDataset,
path=dataset_path,
name=_task,
abbr='charm-reason-' + _task + '_' + _cot,
reader_cfg=charm_reason_reader_cfg,
infer_cfg=charm_reason_infer_cfg.copy(),
eval_cfg=charm_reason_eval_cfg.copy(),
)
)
Original file line number Diff line number Diff line change
@@ -1,8 +1,4 @@
import os
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import CharmDataset, charm_rea_postprocess, CharmReaEvaluator

charm_tasks = [
'Chinese_Anachronisms_Judgment',
Expand All @@ -21,58 +17,20 @@
'Global_Time_Understanding',
]

XLT_template = 'Follow the given examples and answer the question.\n{_hint}\n\n I want you to act as an commonsense reasoning expert for Chinese. \n Request: {{input}}\n'
Translate_EN_template = 'Follow the given examples and answer the question.\n{_hint}\n\nQ: {{input}}\nA: '
Other_template = '请按照给定的例子回答问题。\n{_hint}\n\nQ:{{input}}\nA:'

data_dir = 'data/CHARM'
dataset_path_ZH = f'{data_dir}/reasoning'
dataset_path_TransEn = f'{data_dir}/reasoning_Translate-EN'
fewshot_example_path_ZH = os.path.join(os.path.dirname(__file__), 'few-shot-examples')
fewshot_example_path_TransEn = os.path.join(os.path.dirname(__file__), 'few-shot-examples_Translate-EN')

XLT_template = 'Follow the given examples and answer the question.\n{_hint}\n\n I want you to act as an commonsense reasoning expert for Chinese. \n Request: {{input}}\n'
Translate_EN_template = 'Follow the given examples and answer the question.\n{_hint}\n\nQ: {{input}}\nA: '
Other_template = '请按照给定的例子回答问题。\n{_hint}\n\nQ:{{input}}\nA:'

settings = [
('Direct', '', dataset_path_ZH, fewshot_example_path_ZH, Other_template),
('ZH-CoT', '让我们一步一步来思考。', dataset_path_ZH, fewshot_example_path_ZH, Other_template),
('EN-CoT', "Let's think step by step.", dataset_path_ZH, fewshot_example_path_ZH, Other_template),
('XLT', """You should retell the request in English.\nYou should do the answer step by step to choose the right answer.\nYou should step-by-step answer the request.\nYou should tell me the answer in this format 'So the answer is'.""", dataset_path_ZH, fewshot_example_path_ZH, XLT_template),
('Translate-EN', "Let's think step by step.", dataset_path_TransEn, fewshot_example_path_TransEn, Translate_EN_template),
]

charm_rea_datasets = []

for _cot, _cot_prefix, dataset_path, fewshot_example_path, prompt_template in settings:
for _task in charm_tasks:
_fewshot_example_file = os.path.join(fewshot_example_path, f'{_task}_{_cot}.txt')
with open(_fewshot_example_file, 'r') as f:
_hint = f.read()

charm_rea_reader_cfg = dict(input_columns=['input'], output_column='target')

charm_rea_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[dict(role='HUMAN', prompt=prompt_template.format(_hint=_hint) + _cot_prefix)]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)

charm_rea_eval_cfg = dict(
evaluator=dict(type=CharmReaEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=charm_rea_postprocess),
dataset_postprocessor=dict(type=charm_rea_postprocess),
)

charm_rea_datasets.append(
dict(
type=CharmDataset,
path=dataset_path,
name=_task,
abbr='charm-rea-' + _task + '_' + _cot,
reader_cfg=charm_rea_reader_cfg,
infer_cfg=charm_rea_infer_cfg.copy(),
eval_cfg=charm_rea_eval_cfg.copy(),
)
)
Loading

0 comments on commit e407609

Please sign in to comment.