-
Notifications
You must be signed in to change notification settings - Fork 4
/
eval_mmu2.py
137 lines (112 loc) · 4.65 KB
/
eval_mmu2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# ------------------------------------------------------------------------------
# Libraries
# ------------------------------------------------------------------------------
import os
import numpy as np
from glob import glob
from tqdm import tqdm
from random import shuffle
from itertools import repeat
from multiprocessing import Pool
from fnc.extractFeature import extractFeature
from fnc.matching import calHammingDist
# ------------------------------------------------------------------------------
# Parameters
# ------------------------------------------------------------------------------
MMU2_DIR = "../MMU2"
N_IMAGES = 5
eyelashes_thresholds = np.linspace(start=10, stop=250, num=25)
thresholds = np.linspace(start=0.0, stop=1.0, num=100)
N_WORKERS = 4
# ------------------------------------------------------------------------------
# Pool function of extracting feature
# ------------------------------------------------------------------------------
def pool_func_extract_feature(args):
im_filename, eyelashes_thres, use_multiprocess = args
template, mask, im_filename = extractFeature(
im_filename=im_filename,
eyelashes_thres=eyelashes_thres,
use_multiprocess=use_multiprocess,
)
return template, mask, im_filename
# ------------------------------------------------------------------------------
# Pool function of calculating Hamming distance
# ------------------------------------------------------------------------------
def pool_func_calHammingDist(args):
template1, mask1, template2, mask2 = args
dist = calHammingDist(template1, mask1, template2, mask2)
return dist
# ------------------------------------------------------------------------------
# Main execution
# ------------------------------------------------------------------------------
# Get identities of MMU2 dataset
identities = glob(os.path.join(MMU2_DIR, "**"))
identities = sorted([os.path.basename(identity) for identity in identities])
n_identities = len(identities)
print("Number of identities:", n_identities)
# Construct a dictionary of files
image_files = []
for identity in identities:
if identity == "50":
continue
files = glob(os.path.join(MMU2_DIR, identity, "*.*"))
shuffle(files)
image_files += files[:N_IMAGES]
n_image_files = len(image_files)
print("Number of image files:", n_image_files)
# Ground truth
ground_truth = np.zeros([n_image_files, n_image_files], dtype=int)
for i in range(ground_truth.shape[0]):
for j in range(ground_truth.shape[1]):
if i//N_IMAGES == j//N_IMAGES:
ground_truth[i, j] = 1
# Evaluate parameters
pools = Pool(processes=N_WORKERS)
best_results = []
for eye_threshold in tqdm(eyelashes_thresholds, total=len(eyelashes_thresholds)):
# Extract features
args = zip(image_files, repeat(eye_threshold), repeat(False))
features = list(pools.map(pool_func_extract_feature, args))
# Calculate the distances
args = []
for i in range(n_image_files):
for j in range(n_image_files):
if i >= j:
continue
arg = (features[i][0], features[i][1],
features[j][0], features[j][1])
args.append(arg)
distances = pools.map(pool_func_calHammingDist, args)
# Construct a distance matrix
k = 0
dist_mat = np.zeros([n_image_files, n_image_files])
for i in range(n_image_files):
for j in range(n_image_files):
if i < j:
dist_mat[i, j] = distances[k]
k += 1
elif i > j:
dist_mat[i, j] = dist_mat[j, i]
# Metrics
accuracies, precisions, recalls, fscores = [], [], [], []
for threshold in thresholds:
decision_map = (dist_mat <= threshold).astype(int)
accuracy = (decision_map == ground_truth).sum() / ground_truth.size
precision = (ground_truth*decision_map).sum() / decision_map.sum()
recall = (ground_truth*decision_map).sum() / ground_truth.sum()
fscore = 2*precision*recall / (precision+recall)
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
fscores.append(fscore)
# Save the best result
best_fscore = max(fscores)
best_threshold = thresholds[fscores.index(best_fscore)]
best_results.append((eye_threshold, best_threshold, best_fscore))
# Show the final best result
eye_thresholds = [item[0] for item in best_results]
thresholds = [item[1] for item in best_results]
fscores = [item[2] for item in best_results]
print("Maximum fscore:", max(fscores))
print("Best eye_threshold:", eye_thresholds[fscores.index(max(fscores))])
print("Best threshold:", thresholds[fscores.index(max(fscores))])