xuesongyan commited on
Commit
12c65b8
·
1 Parent(s): 0f7e408

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import net
4
+ import argparse
5
+ from config import set_cfg, cfg
6
+ from SpeakerNet import *
7
+ import lossfunction
8
+ from DatasetLoader import loadWAV
9
+
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument("--config_name", type=str, default="ECAPA_TDNN_data_cfg", help="the configs name that will as a base configs")
12
+ parser.add_argument("--resume", default="train_models/epoch_37_ECAPA_TDNN2.48", type=str, help="resume path")
13
+ args = parser.parse_args()
14
+ global cfg
15
+ assert args.config_name is not None
16
+ if args.config_name:
17
+ set_cfg(args.config_name)
18
+ cfg.replace(vars(args))
19
+ del args
20
+
21
+ device = torch.device("cpu")
22
+ model = getattr(net, cfg.model)().to(device)
23
+ loss = getattr(lossfunction, cfg.loss)(cfg.nOut, cfg.nClasses).to(device)
24
+ model = SpeakerNet(model=model, trainfunc=loss, nPerSpeaker=cfg.nPerSpeaker)
25
+
26
+ ckpt = torch.load("train_models/epoch_37_ECAPA_TDNN2.48", map_location="cpu")
27
+ model.load_state_dict(ckpt['model_state_dict'], strict=False)
28
+ print("checkpoint加载完毕!")
29
+
30
+ model.eval()
31
+
32
+ def SpeakerVerification(path1,path2):
33
+ inp1 = loadWAV(path1, max_frames=300, evalmode=True)
34
+ inp2 = loadWAV(path2, max_frames=300, evalmode=True)
35
+ # print(inp1.shape)
36
+ inp1 = torch.FloatTensor(inp1)
37
+ inp2 = torch.FloatTensor(inp2)
38
+ # print(inp1.shape)
39
+ with torch.no_grad():
40
+ emb1 = model(inp1).detach().cpu()
41
+ emb2 = model(inp2).detach().cpu()
42
+ emb1 = F.normalize(emb1, p=2, dim=1)
43
+ emb2 = F.normalize(emb2, p=2, dim=1)
44
+ dist = F.cosine_similarity(emb1.unsqueeze(-1), emb2.unsqueeze(-1).transpose(0, 2)).numpy()
45
+ score = numpy.mean(dist)
46
+ print(score)
47
+ # threshold = 0.414
48
+ if score >= 0.414:
49
+ output = "同一个人"
50
+ else:
51
+ output = "不同的人"
52
+
53
+ return output
54
+
55
+ inputs = [
56
+ gr.inputs.Audio(source="upload", type="filepath", label="Speaker #1", optional=False),
57
+ gr.inputs.Audio(source="upload", type="filepath", label="Speaker #2", optional=False)
58
+ ]
59
+
60
+
61
+ examples = [["example/speaker1-1.wav", "example/speaker1-2.wav"],
62
+ ["example/speaker1-1.wav", "example/speaker2-1.wav"],
63
+ ["example/speaker2-1.wav", "example/speaker2-1.wav"],
64
+ ["example/speaker1-2.wav", "example/speaker2-2.wav"]
65
+ ]
66
+
67
+ iface = gr.Interface(fn=SpeakerVerification, inputs=inputs, outputs="text", examples=examples)
68
+ iface.launch(share=True)
69
+
70
+ if __name__ == '__main__':
71
+ # print(SpeakerVerification("example/speaker1-1.wav", "example/speaker1-2.wav"))
72
+ pass