File size: 6,786 Bytes
901e06a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
"""
Usage:
    This scripts it to evaluate the classification accuracy/error rate from the embedding extracted
    by gen_audio_embedding.py 
    Example (LID classification)

    PYTHONPATH='.' python examples/wav2vec/eval_speaker_clf_task.py \
            --data /fsx/androstj/exps/lid_voxlingua/infer/atj_xlsr2_100pct_300M_mean_fast_upd_100k_new.npz \
            --task cls --merge mean_logit
"""
import numpy as np
import sklearn
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import ipdb
import logging
import argparse
from scipy.special import softmax

log=logging.getLogger(__name__)
log.setLevel(logging.INFO)

def calculate_eer(y_label, y_score):
    # y denotes groundtruth scores,
    # y_score denotes the prediction scores.
    from scipy.optimize import brentq
    from sklearn.metrics import roc_curve
    from scipy.interpolate import interp1d

    fpr, tpr, thresholds = roc_curve(y_label, y_score, pos_label=1)
    eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
    optimal_threshold = interp1d(fpr, thresholds)(eer)
    return eer, optimal_threshold

def calculate_minDCF(y_label, y_score, p_target=0.01, c_miss=1, c_fa=1):
    # https://github.com/kaldi-asr/kaldi/blob/master/egs/sre08/v1/sid/compute_min_dcf.py
    from sklearn.metrics import det_curve
    fpr, fnr, thresholds = det_curve(y_label, y_score, pos_label=1)
    min_c_det = float("inf")
    min_c_det_threshold = thresholds[0]
    for i in range(0, len(fpr)):
        # See Equation (2).  it is a weighted sum of false negative
        # and false positive errors.
        c_det = c_miss * fnr[i] * p_target + c_fa * fpr[i] * (1 - p_target)
        if c_det < min_c_det:
            min_c_det = c_det
            min_c_det_threshold = thresholds[i]
    # See Equations (3) and (4).  Now we normalize the cost.
    c_def = min(c_miss * p_target, c_fa * (1 - p_target))
    min_dcf = min_c_det / c_def
    return min_dcf, min_c_det_threshold


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data', help='npz contains name & latent file')
    parser.add_argument('--task', choices=['cls', 'veri', 'cls_voxlingua'])
    parser.add_argument('--merge', choices=['mean_logit', 'first_logit', 'mean_latent_sim', 'first_latent_sim', 'mean_logit_sim', 'first_logit_sim'])
    parser.add_argument('--veri-pair', help='verification file contains 1/0 utt_x utt_y')
    parser.add_argument('--scaler', type=str, choices=['mean_var'])
    parser.add_argument('--compress-method', choices=['pca'])
    parser.add_argument('--compress-dim', type=int)
    args = parser.parse_args()

    if args.task in ['cls', 'cls_voxlingua']:
        print('| run classification evaluation')
        data = np.load(args.data)
        data_logit = data['logit']
        data_target = data['target']
        data_src_len = data['src_len']
        assert data_logit.shape[0] ==  data_target.shape[0]
        B = data_logit.shape[0]
        correct = 0
        total = 0
        data_prob = softmax(data_logit, axis=2)
        correct_vs_len = np.empty((B, 2))
        for ii in range(B):
            _target = data_target[ii]
            if args.merge == 'mean_logit':
                _prob = np.mean(data_prob[ii], axis=0)
                top_1 = np.argmax(_prob)
            elif args.merge == 'first_logit':
                _prob = data_prob[ii][0]
                top_1 = np.argmax(_prob)
            else :
                raise ValueError()
            is_top_1 = (1 if top_1 == _target else 0)
            correct += is_top_1
            total += 1
            _src_len = data_src_len[ii] / 16000
            correct_vs_len[ii] = [is_top_1, _src_len]

        acc = correct / total * 100
        t_5 = correct_vs_len[:, 1] <= 5
        t_20 = correct_vs_len[:, 1] > 5
        c_5 = correct_vs_len[t_5, 0].sum()
        c_20 = correct_vs_len[t_20, 0].sum()
        t_5 = t_5.sum()
        t_20 = t_20.sum()
        acc_5 = c_5 / t_5 * 100
        acc_20 = c_20 / t_20 * 100
        print(f'| acc = {acc:.2f}% -- err = {100-acc:.2f}% -- {correct=} {total=}')
        print(f'| acc 0to5 = {acc_5:.2f}% -- err = {100-acc_5:.2f}% -- {c_5=} {t_5=}')
        print(f'| acc 5to20 = {acc_20:.2f}% -- err = {100-acc_20:.2f}% -- {c_20=} {t_20=}')

        

    if args.task == 'veri':
        print('| run verification evaluation')
        veri_pairs = []
        with open(args.veri_pair) as ff:
            for fi in ff:
                a,b,c = fi.split()
                a = int(a)
                veri_pairs.append([a,b,c])
        
        data = np.load(args.data)
        if 'logit' in args.merge:
            data_latent = data['logit']
        elif 'latent' in args.merge:
            data_latent = data['latent']
        else :
            raise ValueError()

        data_name  = data['name']
        assert len(data_name) == len(data_latent)
        map_name_latent = {}

        from sklearn.pipeline import make_pipeline
        pipe = []
        if args.scaler == 'mean_var':
            print(f'| apply StandardScaler')
            pipe.append(StandardScaler())

        if args.compress_method == 'pca':
            n_comp = args.compress_dim
            print(f'| apply PCA with {n_comp=}')
            from sklearn.decomposition import PCA
            pipe.append(PCA(n_components=n_comp))
        if len(pipe) > 0 :
            pipe = make_pipeline(*pipe)
            data_latent_2d = data_latent.reshape(-1, data_latent.shape[-1])
            pipe.fit(data_latent_2d)
            data_latent_2d = pipe.transform(data_latent_2d)
            data_latent = data_latent_2d.reshape(data_latent.shape[0], data_latent.shape[1], -1)

        for ii in range(len(data_name)):
            map_name_latent[data_name[ii]] = data_latent[ii]
        labels = []
        scores = []
        for lbl, pair_a, pair_b in tqdm(veri_pairs):
            labels.append(lbl)
            pair_a = map_name_latent[pair_a]
            pair_b = map_name_latent[pair_b]
            assert pair_a.ndim == pair_b.ndim == 2
            score = cosine_similarity(pair_a, pair_b)
            if args.merge.startswith('mean'):
                score = np.mean(score)
            elif args.merge.startswith('first'):
                score = score[0, 0]
            else :
                raise ValueError()
            scores.append(score)
        labels = np.array(labels)
        scores = np.array(scores)
        eer, eer_threshold = calculate_eer(labels, scores)
        minDCF, minDCF_threshold = calculate_minDCF(labels, scores)
        print('='*40)
        print(f'| EER = {eer*100:.2f}%\tthreshold = {eer_threshold:.2f}')
        print(f'| minDCF = {minDCF:.2f}\tthreshold = {minDCF_threshold:.2f}')