-
Notifications
You must be signed in to change notification settings - Fork 1
/
evaluation.py
170 lines (130 loc) · 6.26 KB
/
evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import sys
import argparse
import os
import time
import numpy as np
import cv2
from glob import glob
import progressbar
from time import sleep
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score
def seg_multi_class(test_mask_dir, truth_mask_dir, weighting):
test_masks = glob(os.path.join(test_mask_dir, '*.png'))
test_masks.extend(glob(os.path.join(test_mask_dir, '*.jpg')))
truth_masks = glob(os.path.join(truth_mask_dir, '*.png'))
truth_masks.extend(glob(os.path.join(truth_mask_dir, '*.jpg')))
print ('[INFO] Performing segmentation evaluation...')
print ('[INFO] %.0f images to process' %len(test_masks))
bar = progressbar.ProgressBar(maxval=len(test_masks), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
precision = 0.0
recall = 0.0
accuracy = 0.0
f1 = 0.0
num_evals = 0
for i, test_img in enumerate(test_masks):
bar.update(i+1)
pred_im = cv2.imread(test_img, 0)
for truth_img in truth_masks:
if os.path.splitext(os.path.basename(truth_img))[0] == os.path.splitext(os.path.basename(test_img))[0]:
gt_im = cv2.imread(truth_img, 0)
if pred_im.size != gt_im.size:
raise TypeError("Images must be matching sizes")
if weighting == 'binary':
pred_im[pred_im>0] = 1
gt_im[gt_im>0] = 1
precision += precision_score(gt_im.reshape(-1, 1), pred_im.reshape(-1, 1), pos_label=1, average=weighting)
recall += recall_score(gt_im.reshape(-1, 1), pred_im.reshape(-1, 1), pos_label=1, average=weighting)
f1 += f1_score(gt_im.reshape(-1, 1), pred_im.reshape(-1, 1), pos_label=1, average=weighting)
accuracy += accuracy_score(gt_im.reshape(-1, 1), pred_im.reshape(-1, 1))
num_evals += 1
return precision/num_evals, recall/num_evals, (accuracy/num_evals)*100, f1/num_evals
def seg_binary(test_mask_dir, truth_mask_dir):
test_masks = glob(os.path.join(test_mask_dir, '*.png'))
test_masks.extend(glob(os.path.join(test_mask_dir, '*.jpg')))
truth_masks = glob(os.path.join(truth_mask_dir, '*.png'))
truth_masks.extend(glob(os.path.join(truth_mask_dir, '*.jpg')))
print ('[INFO] Performing segmentation evaluation...')
print ('[INFO] %.0f images to process' %len(test_masks))
bar = progressbar.ProgressBar(maxval=len(test_masks), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
num_evals = 0
overall_pre = 0.0
overall_rec = 0.0
overall_acc = 0.0
overall_f1 = 0.0
for i, test_img in enumerate(test_masks):
bar.update(i+1)
tp = 0
tn = 0
fp = 0
fn = 0
pred_im = cv2.imread(test_img, 0)
pred_im[pred_im>0] = 255
for truth_img in truth_masks:
if os.path.splitext(os.path.basename(truth_img))[0] == os.path.splitext(os.path.basename(test_img))[0]:
gt_im = cv2.imread(truth_img, 0)
gt_im[gt_im>0] = 255
if pred_im.size != gt_im.size:
raise TypeError("Images must be matching sizes")
tp = float(len(np.where((pred_im==255)&(gt_im==255))[0]))
tn = float(len(np.where((pred_im==0)&(gt_im==0))[0]))
fp = float(len(np.where((pred_im==255)&(gt_im==0))[0]))
fn = float(len(np.where((pred_im==0)&(gt_im==255))[0]))
if tp != 0 and tn != 0:
accuracy = (tp+tn)/(tp+fp+fn+tn)
precision = tp/(tp+fp)
recall = tp/(tp+fn)
f1 = 2*((precision*recall)/(precision+recall))
overall_pre += precision
overall_rec += recall
overall_acc += accuracy
overall_f1 += f1
num_evals += 1
else:
accuracy = 0
precision = 0
recall = 0
f1 = 0
bar.finish()
if num_evals != 0:
overall_pre = overall_pre / num_evals
overall_rec = overall_rec / num_evals
overall_acc = overall_acc / num_evals
overall_f1 = overall_f1 / num_evals
else:
overall_pre = 0
overall_rec = 0
overall_acc = 0
overall_f1 = 0
return overall_pre, overall_rec, overall_acc*100, overall_f1
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--test_images", help="Path to test image directory", required=True)
parser.add_argument("-g", "--ground_truth_images", help="Path to ground truth image directory", required=True)
parser.add_argument("-m", "--multi_class", help="Multiple classes present",default=False, action="store_true")
args = parser.parse_args()
if not os.path.exists(args.test_images):
raise ValueError("Test image directory does not exist")
if not os.path.exists(args.ground_truth_images):
raise ValueError("Ground truth image directory does not exist")#
print ('[INFO] Evaluating Results...')
test_mask_dir = args.test_images
truth_mask_dir = args.ground_truth_images
multi_class = args.multi_class
start = time.time()
if multi_class == False:
precision, recall, accuracy, f1 = seg_binary(test_mask_dir=test_mask_dir, truth_mask_dir=truth_mask_dir)
else:
precision, recall, accuracy, f1 = seg_multi_class(test_mask_dir=test_mask_dir, truth_mask_dir=truth_mask_dir, weighting='weighted')
print ('--------------------------------------------------------')
print ('[RESULTS] PRECISION: %.4f' % precision)
print ('[RESULTS] RECALL: %.4f' % recall)
print ('[RESULTS] ACCURACY: %.4f' % accuracy, '%')
print ('[RESULTS] F1 VALUE: %.4f' % f1)
print ('--------------------------------------------------------')
print ('Processing Time %.2f seconds' % float(time.time()-start))
if __name__ == '__main__':
main()