-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheval.py
105 lines (81 loc) · 3.48 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import argparse
import os
import numpy as np
from sklearn.metrics import precision_recall_curve, average_precision_score
from config import load_yaml_file, ROOT_DIR
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
# parser.add_argument('--cfg_file', type=str, default='cfg_nuscenes.yaml', help='specify the config for training')
parser.add_argument('--cfg_file', type=str, default='cfg_kitti.yaml', help='specify the config for training')
args = parser.parse_args()
cfg = load_yaml_file(args.cfg_file)
return cfg
cfg=parse_config()
def cal_best_PRF(y_true,probas_pred):
'''
计算在任何阈值下,最好的precision,recall。f1
:param y_true:
:param probas_pred:
:return:
'''
precisions, recalls, thresholds = precision_recall_curve(
y_true, probas_pred)
f1s=(2*precisions*recalls)/(precisions+recalls)
f1s[np.isnan(f1s)] = 0
best_index=np.argmax(f1s)
best_t = thresholds[best_index]
aupr = average_precision_score(y_true, probas_pred)
return precisions[best_index],recalls[best_index],f1s[best_index],aupr, best_t
if cfg.datasetname == 'kitti':
from utils_kitti import get_label
# resPath = os.path.join(ROOT_DIR, f'result_kitti.csv')
else:
from nuscenes_related.utils_nuscenes import get_label
boxAScore_path = os.path.join(cfg.dataPath, 'boxAScore')
# boxAScore_path = os.path.join(cfg.dataPath, 'boxAScore_no_Tracking')
print("Evaluating..")
timestamp_list = [os.path.splitext(i)[0] for i in os.listdir(boxAScore_path) if i.endswith('.npy')]
boxAScoreRes = []
box_labelRes = []
type_list=[]
timestamp_index=[]
for timestamp in timestamp_list:
boxAScore = np.load(os.path.join(boxAScore_path, timestamp + '.npy'))
box_list = get_label(os.path.join(cfg.dataPath, 'label_add', timestamp + '.txt'))
box_label = np.array([box_list[i]['ground_truth_label'] for i in range(len(box_list))])
type = np.array([box_list[i]['name'] for i in range(len(box_list))])
boxAScoreRes.append(boxAScore[:box_label.__len__()])
timestamp_index.append(np.full((box_label.__len__()),timestamp))
box_labelRes.append(box_label)
type_list.append(type)
boxAScoreRes = np.concatenate(boxAScoreRes)
box_labelRes = np.concatenate(box_labelRes).astype(np.int32)
timestamp_index=np.concatenate(timestamp_index)
tt=boxAScoreRes[box_labelRes==0]
timestamp_index_tt=timestamp_index[box_labelRes==0]
ff=boxAScoreRes[box_labelRes==1]
timestamp_index_ff=timestamp_index[box_labelRes==1]
np.set_printoptions(threshold=1e6)
print('lable_0,but AS high:')
print('\',\''.join(timestamp_index_tt[np.argsort(tt)[::-1]]))
print('lable_1,but AS low:')
print('\',\''.join(list(timestamp_index_ff[np.argsort(ff)])))
# boxAScoreRes[boxAScoreRes>0.5] = 1
# boxAScoreRes[boxAScoreRes<=0.5] = 0
type_list = np.concatenate(type_list)
Precision, Recall, F1, AP, best_t = cal_best_PRF(box_labelRes, boxAScoreRes)
print("Precision:{}, Recall:{}, F-score:{}, AP:{}, best_thre:{}".format(Precision, Recall, F1, AP, best_t))
APs = []
F1s = []
for type in set(type_list):
box_labelRes_one = box_labelRes[type_list == type]
boxAScoreRes_one = boxAScoreRes[type_list == type]
print(type)
Precision, Recall, F1, AP, best_t= cal_best_PRF(box_labelRes_one, boxAScoreRes_one)
print("Precision:{}, Recall:{}, F-score:{}, AP:{}, best_thre:{}".format(Precision, Recall, F1, AP,best_t))
APs.append(AP)
F1s.append(F1)
print()
mAP=np.array(APs).mean()
mF1=np.array(F1s).mean()
print('mF1:{}; mAP:{}'.format(mF1, mAP))