-
Notifications
You must be signed in to change notification settings - Fork 0
/
Evaluation.py
149 lines (123 loc) · 5.92 KB
/
Evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import argparse
import json
import os
import numpy as np
import torch
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, precision_score, recall_score, jaccard_score
from sklearn.preprocessing import MultiLabelBinarizer
from model.Utils import replicability, Config, hamming_score, rounder
def evaluation_AP(prediction_path=None, label_path=None, mlb=None):
prediction_list=[]
label_list=[]
id2label={}
conversations = torch.load(label_path)
for conversation in conversations:
for example in conversation:
id2label[example["example_id"]]=list(mlb.fit_transform([example["system_action"]])[0])
with open(prediction_path, 'r') as r:
for line in r:
if len(line.rstrip().split())==1:
actions=[]
else:
example_id, prediction = line.rstrip().split("\t")
actions = prediction.split(",")
prediction_list.append(list(mlb.fit_transform([actions])[0]))
label_list.append(id2label[example_id])
print(len(id2label),len(prediction_list))
assert len(id2label)==len(prediction_list)
f1 = f1_score(label_list, prediction_list, average="macro")
precision = precision_score(label_list, prediction_list, average="macro")
recall = recall_score(label_list, prediction_list, average="macro")
acc_exact = accuracy_score(label_list, prediction_list)
acc_hamming = hamming_score(np.array(label_list), np.array(prediction_list))
jaccard = jaccard_score(np.array(label_list), np.array(prediction_list), average="samples")
precision_detail = precision_score(label_list, prediction_list, average=None)
recall_detail = recall_score(label_list, prediction_list, average=None)
prediction_sys_action_num_all_turns = [sum(prediction) for prediction in prediction_list]
result_dict = {"f1": rounder(f1),
"p": rounder(precision),
"r": rounder(recall),
"jaccard": rounder(jaccard),
"acc_hamming": rounder(acc_hamming),
"acc_exact": rounder(acc_exact),
"aver_sys_action_num": sum(prediction_sys_action_num_all_turns) / len(prediction_sys_action_num_all_turns),
"min_sys_action_num": min(prediction_sys_action_num_all_turns),
"max_sys_action_num": max(prediction_sys_action_num_all_turns),
"p_per_label": [rounder(i) for i in precision_detail],
"r_per_label": [rounder(i) for i in recall_detail],
}
print(result_dict)
return result_dict
def evaluation_SIP(prediction_path=None, label_path=None):
prediction_list=[]
label_list=[]
id2label={}
conversations = torch.load(label_path)
for conversation in conversations:
for example in conversation:
id2label[example["example_id"]]=example["system_I_label"]
with open(prediction_path, 'r') as r:
for line in r:
example_id, prediction = line.rstrip().split("\t")
prediction_list.append(prediction)
label_list.append(id2label[example_id])
assert len(id2label)==len(prediction_list)
label_list = [1 if i == "Initiative" else 0 for i in label_list]
prediction_list = [1 if i == "Initiative" else 0 for i in prediction_list]
acc = accuracy_score(label_list, prediction_list)
matrix = confusion_matrix(label_list, prediction_list, labels=[0, 1])
acc_per_label = matrix.diagonal() / matrix.sum(axis=1)
total_num = matrix.sum(axis=1).tolist()
hit_num = matrix.diagonal().tolist()
f1 = f1_score(label_list, prediction_list, average="macro")
precision = precision_score(label_list, prediction_list, average="macro")
recall = recall_score(label_list, prediction_list, average="macro")
precision_detail = precision_score(label_list, prediction_list, average=None)
recall_detail = recall_score(label_list, prediction_list, average=None)
result_dict = {
"f1": rounder(f1),
"p": rounder(precision),
"r": rounder(recall),
"acc": rounder(acc),
"acc_per_label": [rounder(i) for i in acc_per_label],
"total_num": total_num,
"hit_num": hit_num,
"p_per_label": [rounder(i) for i in precision_detail],
"r_per_label": [rounder(i) for i in recall_detail]
}
print(result_dict)
return result_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--prediction_path", type=str)
parser.add_argument("--label_path", type=str)
parser.add_argument("--epoch_num", type=int, default=20)
args = parser.parse_args()
if "WISE" in args.label_path:
args.dataset= "WISE"
elif "MSDialog" in args.label_path:
args.dataset = "MSDialog"
elif "ClariQ" in args.label_path:
args.dataset = "ClariQ"
else:
raise NotImplementedError
if "train" in args.input_path:
args.dataset_type="train"
elif "valid" in args.input_path:
args.dataset_type= "valid"
elif "test" in args.input_path:
args.dataset_type = "test"
else:
raise NotImplementedError
config = Config(args)
mlb = MultiLabelBinarizer(classes=config.action)
for epoch_id in range(0,args.epoch_num+1):
prediction_path_ = args.prediction_path + "/" + args.dataset_type+"."+ str(epoch_id) + '.txt'
if os.path.exists(prediction_path_):
print(f"Start to evaluate {epoch_id}")
if "AP" in args.prediction_path:
result_dict = evaluation_AP(prediction_path_, args.label_path, mlb)
else:
result_dict = evaluation_SIP(prediction_path_, args.label_path)
with open(args.prediction_path+"/"+"result.+"+args.dataset_type+"+.txt", 'a+', encoding='utf-8') as w:
w.write(str(epoch_id) + ": " + str(result_dict) + os.linesep)