File size: 2,813 Bytes
ce7b81a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import gradio as gr
import torch
import net
import argparse
from config import set_cfg, cfg
from SpeakerNet import *
import lossfunction
from DatasetLoader import loadWAV

parser = argparse.ArgumentParser()
parser.add_argument("--config_name", type=str, default="ECAPA_TDNN_data_cfg", help="the configs name that will as a base configs")
parser.add_argument("--resume", default="train_models/epoch_37_ECAPA_TDNN2.48", type=str, help="resume path")
args = parser.parse_args()
global cfg
assert args.config_name is not None
if args.config_name:
    set_cfg(args.config_name)
cfg.replace(vars(args))
del args

device = torch.device("cpu")
model = getattr(net, cfg.model)().to(device)
loss = getattr(lossfunction, cfg.loss)(cfg.nOut, cfg.nClasses).to(device)
model = SpeakerNet(model=model, trainfunc=loss, nPerSpeaker=cfg.nPerSpeaker)

ckpt = torch.load("train_models/epoch_37_ECAPA_TDNN2.48", map_location="cpu")
model.load_state_dict(ckpt['model_state_dict'], strict=False)
print("checkpoint加载完毕!")

model.eval()

def SpeakerVerification(path1,path2):
    inp1 = loadWAV(path1, max_frames=300, evalmode=True)
    inp2 = loadWAV(path2, max_frames=300, evalmode=True)
    # print(inp1.shape)
    inp1 = torch.FloatTensor(inp1)
    inp2 = torch.FloatTensor(inp2)
    # print(inp1.shape)
    with torch.no_grad():
        emb1 = model(inp1).detach().cpu()
        emb2 = model(inp2).detach().cpu()
    emb1 = F.normalize(emb1, p=2, dim=1)
    emb2 = F.normalize(emb2, p=2, dim=1)
    dist = F.cosine_similarity(emb1.unsqueeze(-1),  emb2.unsqueeze(-1).transpose(0, 2)).numpy()
    score = numpy.mean(dist)
    print(score)
    # threshold = 0.414
    if score >= 0.414:
        output = "同一个人"
    else:
        output = "不同的人"

    return output

inputs = [
    gr.inputs.Audio(source="upload", type="filepath", label="Speaker #1", optional=False),
    gr.inputs.Audio(source="upload", type="filepath", label="Speaker #2", optional=False)
]


examples = [["example/speaker1-1.wav", "example/speaker1-2.wav"],
            ["example/speaker1-1.wav", "example/speaker2-1.wav"],
            ["example/speaker2-1.wav", "example/speaker2-2.wav"],
            ["example/speaker1-2.wav", "example/speaker2-2.wav"],
            ["example/speaker3-1.wav", "example/speaker3-2.wav"],
            ["example/speaker3-1.wav", "example/speaker4-1.wav"],
            ["example/speaker4-1.wav", "example/speaker4-2.wav"],
            ["example/speaker3-2.wav", "example/speaker4-2.wav"],
            ["example/speaker4-1.wav", "example/speaker5-2.wav"],
            ]

iface = gr.Interface(fn=SpeakerVerification, inputs=inputs, outputs="text", examples=examples)
iface.launch(share=True)

if __name__ == '__main__':
    # print(SpeakerVerification("example/speaker1-1.wav", "example/speaker1-2.wav"))
    pass