-
Notifications
You must be signed in to change notification settings - Fork 405
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
73 changed files
with
2,264 additions
and
903 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
42 changes: 42 additions & 0 deletions
42
configs/datasets/GaokaoBench/GaokaoBench_no_subjective_gen_4c31db.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
import os | ||
from opencompass.openicl.icl_prompt_template import PromptTemplate | ||
from opencompass.openicl.icl_retriever import ZeroRetriever | ||
from opencompass.openicl.icl_inferencer import GenInferencer | ||
from opencompass.datasets import GaokaoBenchDataset | ||
from mmengine.config import read_base | ||
|
||
with read_base(): | ||
from .GaokaoBench_prompts import MCQ_prompts, FBQ_prompts | ||
|
||
GaokaoBench_datasets = [] | ||
for folder, prompts in [ | ||
("Multiple-choice_Questions", MCQ_prompts), | ||
("Fill-in-the-blank_Questions", FBQ_prompts), | ||
]: | ||
for p in prompts: | ||
reader_cfg = { | ||
"input_columns": ["question"], | ||
"output_column": "answer", | ||
} | ||
infer_cfg = { | ||
"ice_template": { | ||
"type": PromptTemplate, | ||
"template": {"round": [{"role": "HUMAN", "prompt": p["prefix_prompt"] + "{question}"}]}, | ||
"ice_token": "</E>", | ||
}, | ||
"retriever": {"type": ZeroRetriever}, | ||
"inferencer": {"type": GenInferencer, "max_out_len": 1024}, | ||
} | ||
eval_cfg = { | ||
"evaluator": {"type": "GaokaoBenchEvaluator" + "_" + p["type"]}, | ||
"pred_role": "BOT", | ||
} | ||
dataset = { | ||
"type": GaokaoBenchDataset, | ||
"abbr": "GaokaoBench_" + p["keyword"], | ||
"path": os.path.join("data", "GAOKAO-BENCH", "data", folder, p["keyword"] + ".json"), | ||
"reader_cfg": reader_cfg, | ||
"infer_cfg": infer_cfg, | ||
"eval_cfg": eval_cfg, | ||
} | ||
GaokaoBench_datasets.append(dataset) |
41 changes: 41 additions & 0 deletions
41
configs/datasets/GaokaoBench/GaokaoBench_no_subjective_gen_d21e37.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,41 @@ | ||
import os | ||
from opencompass.openicl.icl_prompt_template import PromptTemplate | ||
from opencompass.openicl.icl_retriever import ZeroRetriever | ||
from opencompass.openicl.icl_inferencer import GenInferencer | ||
from opencompass.datasets import GaokaoBenchDataset | ||
from mmengine.config import read_base | ||
|
||
with read_base(): | ||
from .GaokaoBench_prompts import MCQ_prompts, FBQ_prompts | ||
|
||
GaokaoBench_datasets = [] | ||
for folder, prompts in [ | ||
("Multiple-choice_Questions", MCQ_prompts), | ||
("Fill-in-the-blank_Questions", FBQ_prompts), | ||
]: | ||
for p in prompts: | ||
reader_cfg = { | ||
"input_columns": ["question"], | ||
"output_column": "answer", | ||
} | ||
infer_cfg = { | ||
"prompt_template": { | ||
"type": PromptTemplate, | ||
"template": p["prefix_prompt"] + "{question}", | ||
}, | ||
"retriever": {"type": ZeroRetriever}, | ||
"inferencer": {"type": GenInferencer, "max_out_len": 1024}, | ||
} | ||
eval_cfg = { | ||
"evaluator": {"type": "GaokaoBenchEvaluator" + "_" + p["type"]}, | ||
"pred_role": "BOT", | ||
} | ||
dataset = { | ||
"type": GaokaoBenchDataset, | ||
"abbr": "GaokaoBench_" + p["keyword"], | ||
"path": os.path.join("data", "GAOKAO-BENCH", "data", folder, p["keyword"] + ".json"), | ||
"reader_cfg": reader_cfg, | ||
"infer_cfg": infer_cfg, | ||
"eval_cfg": eval_cfg, | ||
} | ||
GaokaoBench_datasets.append(dataset) |
Large diffs are not rendered by default.
Oops, something went wrong.
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
38 changes: 38 additions & 0 deletions
38
configs/datasets/TheoremQA/TheoremQA_post_v2_gen_2c2583.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,38 @@ | ||
from opencompass.openicl.icl_prompt_template import PromptTemplate | ||
from opencompass.openicl.icl_retriever import ZeroRetriever | ||
from opencompass.openicl.icl_inferencer import GenInferencer | ||
from opencompass.openicl.icl_evaluator import AccEvaluator | ||
from opencompass.datasets import TheoremQADataset, TheoremQA_postprocess_v2 | ||
|
||
TheoremQA_reader_cfg = dict(input_columns=["Question", "Answer_type"], output_column="Answer", train_split="test") | ||
|
||
TheoremQA_prompt1 = """You are a mathematician, you are supposed to answer the given question. You need to output the answer in your final sentence like "Therefore, the answer is ...". The answer can only be one of the following forms: | ||
1. a numerical value like 0.1, no symbol and no unit at all. | ||
2. a list of number like [2, 3, 4]. | ||
3. True/False. | ||
4. an option like (a), (b), (c), (d) | ||
""" | ||
TheoremQA_prompt2 = "Question: {Question}\nLet's think step by step." | ||
|
||
TheoremQA_infer_cfg = dict( | ||
prompt_template=dict( | ||
type=PromptTemplate, | ||
template=TheoremQA_prompt1 + TheoremQA_prompt2, | ||
), | ||
retriever=dict(type=ZeroRetriever), | ||
inferencer=dict(type=GenInferencer, max_out_len=512), | ||
) | ||
|
||
# 正确的 evaluator 需要借助于 llm 来进行答案提取,此评测逻辑亦会有较多 FN 。 | ||
TheoremQA_eval_cfg = dict(evaluator=dict(type=AccEvaluator), pred_postprocessor=dict(type=TheoremQA_postprocess_v2)) | ||
|
||
TheoremQA_datasets = [ | ||
dict( | ||
abbr="TheoremQA", | ||
type=TheoremQADataset, | ||
path="./data/TheoremQA/test.csv", | ||
reader_cfg=TheoremQA_reader_cfg, | ||
infer_cfg=TheoremQA_infer_cfg, | ||
eval_cfg=TheoremQA_eval_cfg, | ||
) | ||
] |
45 changes: 45 additions & 0 deletions
45
configs/datasets/TheoremQA/TheoremQA_post_v2_gen_ef26ca.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
from opencompass.openicl.icl_prompt_template import PromptTemplate | ||
from opencompass.openicl.icl_retriever import ZeroRetriever | ||
from opencompass.openicl.icl_inferencer import GenInferencer | ||
from opencompass.openicl.icl_evaluator import AccEvaluator | ||
from opencompass.datasets import TheoremQADataset, TheoremQA_postprocess_v2 | ||
|
||
TheoremQA_reader_cfg = dict(input_columns=["Question", "Answer_type"], output_column="Answer", train_split="test") | ||
|
||
TheoremQA_prompt1 = """You are a mathematician, you are supposed to answer the given question. You need to output the answer in your final sentence like "Therefore, the answer is ...". The answer can only be one of the following forms: | ||
1. a numerical value like 0.1, no symbol and no unit at all. | ||
2. a list of number like [2, 3, 4]. | ||
3. True/False. | ||
4. an option like (a), (b), (c), (d) | ||
""" | ||
TheoremQA_prompt2 = "Question: {Question}\nLet's think step by step." | ||
|
||
TheoremQA_infer_cfg = dict( | ||
prompt_template=dict( | ||
type=PromptTemplate, | ||
template=dict( | ||
round=[ | ||
dict( | ||
role="HUMAN", | ||
prompt=TheoremQA_prompt1 + TheoremQA_prompt2, | ||
), | ||
] | ||
), | ||
), | ||
retriever=dict(type=ZeroRetriever), | ||
inferencer=dict(type=GenInferencer, max_out_len=512), | ||
) | ||
|
||
# 正确的 evaluator 需要借助于 llm 来进行答案提取,此评测逻辑亦会有较多 FN 。 | ||
TheoremQA_eval_cfg = dict(evaluator=dict(type=AccEvaluator), pred_postprocessor=dict(type=TheoremQA_postprocess_v2)) | ||
|
||
TheoremQA_datasets = [ | ||
dict( | ||
abbr="TheoremQA", | ||
type=TheoremQADataset, | ||
path="./data/TheoremQA/test.csv", | ||
reader_cfg=TheoremQA_reader_cfg, | ||
infer_cfg=TheoremQA_infer_cfg, | ||
eval_cfg=TheoremQA_eval_cfg, | ||
) | ||
] |
Oops, something went wrong.