-
Notifications
You must be signed in to change notification settings - Fork 27
/
metrics.py
85 lines (64 loc) · 3.32 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from sklearn import metrics
from skimage import measure
import cv2
import numpy as np
import pandas as pd
def compute_best_pr_re(anomaly_ground_truth_labels, anomaly_prediction_weights):
"""
Computes the best precision, recall and threshold for a given set of
anomaly ground truth labels and anomaly prediction weights.
"""
precision, recall, thresholds = metrics.precision_recall_curve(anomaly_ground_truth_labels, anomaly_prediction_weights)
f1_scores = 2 * (precision * recall) / (precision + recall)
best_threshold = thresholds[np.argmax(f1_scores)]
best_precision = precision[np.argmax(f1_scores)]
best_recall = recall[np.argmax(f1_scores)]
print(best_threshold, best_precision, best_recall)
return best_threshold, best_precision, best_recall
def compute_imagewise_retrieval_metrics(anomaly_prediction_weights, anomaly_ground_truth_labels, path='training'):
"""
Computes retrieval statistics (AUROC, FPR, TPR).
"""
auroc = metrics.roc_auc_score(anomaly_ground_truth_labels, anomaly_prediction_weights)
ap = 0. if path == 'training' else metrics.average_precision_score(anomaly_ground_truth_labels, anomaly_prediction_weights)
return {"auroc": auroc, "ap": ap}
def compute_pixelwise_retrieval_metrics(anomaly_segmentations, ground_truth_masks, path='train'):
"""
Computes pixel-wise statistics (AUROC, FPR, TPR) for anomaly segmentations
and ground truth segmentation masks.
"""
if isinstance(anomaly_segmentations, list):
anomaly_segmentations = np.stack(anomaly_segmentations)
if isinstance(ground_truth_masks, list):
ground_truth_masks = np.stack(ground_truth_masks)
flat_anomaly_segmentations = anomaly_segmentations.ravel()
flat_ground_truth_masks = ground_truth_masks.ravel()
auroc = metrics.roc_auc_score(flat_ground_truth_masks.astype(int), flat_anomaly_segmentations)
ap = 0. if path == 'training' else metrics.average_precision_score(flat_ground_truth_masks.astype(int), flat_anomaly_segmentations)
return {"auroc": auroc, "ap": ap}
def compute_pro(masks, amaps, num_th=200):
df = pd.DataFrame([], columns=["pro", "fpr", "threshold"])
binary_amaps = np.zeros_like(amaps, dtype=bool)
min_th = amaps.min()
max_th = amaps.max()
delta = (max_th - min_th) / num_th
k = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
for th in np.arange(min_th, max_th, delta):
binary_amaps[amaps <= th] = 0
binary_amaps[amaps > th] = 1
pros = []
for binary_amap, mask in zip(binary_amaps, masks):
binary_amap = cv2.dilate(binary_amap.astype(np.uint8), k)
for region in measure.regionprops(measure.label(mask)):
axes0_ids = region.coords[:, 0]
axes1_ids = region.coords[:, 1]
tp_pixels = binary_amap[axes0_ids, axes1_ids].sum()
pros.append(tp_pixels / region.area)
inverse_masks = 1 - masks
fp_pixels = np.logical_and(inverse_masks, binary_amaps).sum()
fpr = fp_pixels / inverse_masks.sum()
df = pd.concat([df, pd.DataFrame({"pro": np.mean(pros), "fpr": fpr, "threshold": th}, index=[0])])
df = df[df["fpr"] < 0.3]
df["fpr"] = (df["fpr"] - df["fpr"].min()) / (df["fpr"].max() - df["fpr"].min() + 1e-10)
pro_auc = metrics.auc(df["fpr"], df["pro"])
return pro_auc