CongBang commited on
Commit
e3c2b9c
·
verified ·
1 Parent(s): bcb4248

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +997 -0
  2. Attention.py +325 -0
  3. __pycache__/Attention.cpython-310.pyc +0 -0
  4. __pycache__/attentions.cpython-310.pyc +0 -0
  5. __pycache__/commons.cpython-310.pyc +0 -0
  6. __pycache__/data_utils.cpython-310.pyc +0 -0
  7. __pycache__/mel_processing.cpython-310.pyc +0 -0
  8. __pycache__/models_mel_style.cpython-310.pyc +0 -0
  9. __pycache__/modules.cpython-310.pyc +0 -0
  10. __pycache__/transforms.cpython-310.pyc +0 -0
  11. __pycache__/utils.cpython-310.pyc +0 -0
  12. app_gradio.py +150 -0
  13. attentions.py +303 -0
  14. commons.py +161 -0
  15. configs/bert_1.pt +3 -0
  16. configs/bert_3.pt +3 -0
  17. configs/bert_5.pt +3 -0
  18. configs/step_1000000.t7 +3 -0
  19. configs/vie_bert.yml +30 -0
  20. configs/vn_base.json +52 -0
  21. data_utils.py +634 -0
  22. infer_result/test_0.wav +3 -0
  23. infer_result/test_1.wav +3 -0
  24. infer_result/test_2.wav +3 -0
  25. infer_result/test_3.wav +3 -0
  26. logs/large_audio/D_504000.pth +3 -0
  27. logs/large_audio/G_504000.pth +3 -0
  28. logs/male_vie/D_0.pth +3 -0
  29. logs/male_vie/D_1500.pth +3 -0
  30. logs/male_vie/G_0.pth +3 -0
  31. logs/male_vie/G_1500.pth +3 -0
  32. logs/male_vie/G_20000.pth +3 -0
  33. logs/male_vie/config.json +52 -0
  34. logs/male_vie/eval/events.out.tfevents.1710755437.HungVo.15112.1 +3 -0
  35. logs/male_vie/eval/events.out.tfevents.1710755461.HungVo.19504.1 +3 -0
  36. logs/male_vie/eval/events.out.tfevents.1710755705.HungVo.1052.1 +3 -0
  37. logs/male_vie/eval/events.out.tfevents.1710756795.HungVo.1832.1 +3 -0
  38. logs/male_vie/eval/events.out.tfevents.1710756989.HungVo.1676.1 +3 -0
  39. logs/male_vie/eval/events.out.tfevents.1710764452.HungVo.23912.1 +3 -0
  40. logs/male_vie/events.out.tfevents.1710669409.HungVo.3648.0 +3 -0
  41. logs/male_vie/events.out.tfevents.1710755437.HungVo.15112.0 +3 -0
  42. logs/male_vie/events.out.tfevents.1710755461.HungVo.19504.0 +3 -0
  43. logs/male_vie/events.out.tfevents.1710755705.HungVo.1052.0 +3 -0
  44. logs/male_vie/events.out.tfevents.1710756795.HungVo.1832.0 +3 -0
  45. logs/male_vie/events.out.tfevents.1710756989.HungVo.1676.0 +3 -0
  46. logs/male_vie/events.out.tfevents.1710764452.HungVo.23912.0 +3 -0
  47. logs/male_vie/train.log +167 -0
  48. mel_processing.py +112 -0
  49. models_mel_style.py +991 -0
  50. modules.py +543 -0
.gitattributes CHANGED
@@ -33,3 +33,1000 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ configs/step_1000000.t7 filter=lfs diff=lfs merge=lfs -text
37
+ infer_result/test_0.wav filter=lfs diff=lfs merge=lfs -text
38
+ infer_result/test_1.wav filter=lfs diff=lfs merge=lfs -text
39
+ infer_result/test_2.wav filter=lfs diff=lfs merge=lfs -text
40
+ infer_result/test_3.wav filter=lfs diff=lfs merge=lfs -text
41
+ wav/wav_1/00000.wav filter=lfs diff=lfs merge=lfs -text
42
+ wav/wav_1/00001.wav filter=lfs diff=lfs merge=lfs -text
43
+ wav/wav_1/00002.wav filter=lfs diff=lfs merge=lfs -text
44
+ wav/wav_1/00003.wav filter=lfs diff=lfs merge=lfs -text
45
+ wav/wav_1/00004.wav filter=lfs diff=lfs merge=lfs -text
46
+ wav/wav_1/00005.wav filter=lfs diff=lfs merge=lfs -text
47
+ wav/wav_1/00006.wav filter=lfs diff=lfs merge=lfs -text
48
+ wav/wav_1/00007.wav filter=lfs diff=lfs merge=lfs -text
49
+ wav/wav_1/00008.wav filter=lfs diff=lfs merge=lfs -text
50
+ wav/wav_1/00009.wav filter=lfs diff=lfs merge=lfs -text
51
+ wav/wav_1/00010.wav filter=lfs diff=lfs merge=lfs -text
52
+ wav/wav_1/00011.wav filter=lfs diff=lfs merge=lfs -text
53
+ wav/wav_1/00012.wav filter=lfs diff=lfs merge=lfs -text
54
+ wav/wav_1/00013.wav filter=lfs diff=lfs merge=lfs -text
55
+ wav/wav_1/00014.wav filter=lfs diff=lfs merge=lfs -text
56
+ wav/wav_1/00015.wav filter=lfs diff=lfs merge=lfs -text
57
+ wav/wav_1/00016.wav filter=lfs diff=lfs merge=lfs -text
58
+ wav/wav_1/00017.wav filter=lfs diff=lfs merge=lfs -text
59
+ wav/wav_1/00018.wav filter=lfs diff=lfs merge=lfs -text
60
+ wav/wav_1/00019.wav filter=lfs diff=lfs merge=lfs -text
61
+ wav/wav_1/00021.wav filter=lfs diff=lfs merge=lfs -text
62
+ wav/wav_1/00022.wav filter=lfs diff=lfs merge=lfs -text
63
+ wav/wav_1/00023.wav filter=lfs diff=lfs merge=lfs -text
64
+ wav/wav_1/00024.wav filter=lfs diff=lfs merge=lfs -text
65
+ wav/wav_1/00025.wav filter=lfs diff=lfs merge=lfs -text
66
+ wav/wav_1/00026.wav filter=lfs diff=lfs merge=lfs -text
67
+ wav/wav_1/00027.wav filter=lfs diff=lfs merge=lfs -text
68
+ wav/wav_1/00028.wav filter=lfs diff=lfs merge=lfs -text
69
+ wav/wav_1/00029.wav filter=lfs diff=lfs merge=lfs -text
70
+ wav/wav_1/00030.wav filter=lfs diff=lfs merge=lfs -text
71
+ wav/wav_1/00031.wav filter=lfs diff=lfs merge=lfs -text
72
+ wav/wav_1/00033.wav filter=lfs diff=lfs merge=lfs -text
73
+ wav/wav_1/00034.wav filter=lfs diff=lfs merge=lfs -text
74
+ wav/wav_1/00035.wav filter=lfs diff=lfs merge=lfs -text
75
+ wav/wav_1/00036.wav filter=lfs diff=lfs merge=lfs -text
76
+ wav/wav_1/00037.wav filter=lfs diff=lfs merge=lfs -text
77
+ wav/wav_1/00038.wav filter=lfs diff=lfs merge=lfs -text
78
+ wav/wav_1/00039.wav filter=lfs diff=lfs merge=lfs -text
79
+ wav/wav_1/00040.wav filter=lfs diff=lfs merge=lfs -text
80
+ wav/wav_1/00041.wav filter=lfs diff=lfs merge=lfs -text
81
+ wav/wav_1/00042.wav filter=lfs diff=lfs merge=lfs -text
82
+ wav/wav_1/00043.wav filter=lfs diff=lfs merge=lfs -text
83
+ wav/wav_1/00044.wav filter=lfs diff=lfs merge=lfs -text
84
+ wav/wav_1/00045.wav filter=lfs diff=lfs merge=lfs -text
85
+ wav/wav_1/00046.wav filter=lfs diff=lfs merge=lfs -text
86
+ wav/wav_1/00047.wav filter=lfs diff=lfs merge=lfs -text
87
+ wav/wav_1/00048.wav filter=lfs diff=lfs merge=lfs -text
88
+ wav/wav_1/00049.wav filter=lfs diff=lfs merge=lfs -text
89
+ wav/wav_1/00050.wav filter=lfs diff=lfs merge=lfs -text
90
+ wav/wav_1/00051.wav filter=lfs diff=lfs merge=lfs -text
91
+ wav/wav_1/00052.wav filter=lfs diff=lfs merge=lfs -text
92
+ wav/wav_1/00053.wav filter=lfs diff=lfs merge=lfs -text
93
+ wav/wav_1/00054.wav filter=lfs diff=lfs merge=lfs -text
94
+ wav/wav_1/00055.wav filter=lfs diff=lfs merge=lfs -text
95
+ wav/wav_1/00056.wav filter=lfs diff=lfs merge=lfs -text
96
+ wav/wav_1/00057.wav filter=lfs diff=lfs merge=lfs -text
97
+ wav/wav_1/00058.wav filter=lfs diff=lfs merge=lfs -text
98
+ wav/wav_1/00059.wav filter=lfs diff=lfs merge=lfs -text
99
+ wav/wav_1/00060.wav filter=lfs diff=lfs merge=lfs -text
100
+ wav/wav_1/00061.wav filter=lfs diff=lfs merge=lfs -text
101
+ wav/wav_1/00062.wav filter=lfs diff=lfs merge=lfs -text
102
+ wav/wav_1/00063.wav filter=lfs diff=lfs merge=lfs -text
103
+ wav/wav_1/00064.wav filter=lfs diff=lfs merge=lfs -text
104
+ wav/wav_1/00065.wav filter=lfs diff=lfs merge=lfs -text
105
+ wav/wav_1/00066.wav filter=lfs diff=lfs merge=lfs -text
106
+ wav/wav_1/00067.wav filter=lfs diff=lfs merge=lfs -text
107
+ wav/wav_1/00068.wav filter=lfs diff=lfs merge=lfs -text
108
+ wav/wav_1/00069.wav filter=lfs diff=lfs merge=lfs -text
109
+ wav/wav_1/00070.wav filter=lfs diff=lfs merge=lfs -text
110
+ wav/wav_1/00071.wav filter=lfs diff=lfs merge=lfs -text
111
+ wav/wav_1/00072.wav filter=lfs diff=lfs merge=lfs -text
112
+ wav/wav_1/00073.wav filter=lfs diff=lfs merge=lfs -text
113
+ wav/wav_1/00074.wav filter=lfs diff=lfs merge=lfs -text
114
+ wav/wav_1/00075.wav filter=lfs diff=lfs merge=lfs -text
115
+ wav/wav_1/00076.wav filter=lfs diff=lfs merge=lfs -text
116
+ wav/wav_1/00077.wav filter=lfs diff=lfs merge=lfs -text
117
+ wav/wav_1/00078.wav filter=lfs diff=lfs merge=lfs -text
118
+ wav/wav_1/00079.wav filter=lfs diff=lfs merge=lfs -text
119
+ wav/wav_1/00080.wav filter=lfs diff=lfs merge=lfs -text
120
+ wav/wav_1/00081.wav filter=lfs diff=lfs merge=lfs -text
121
+ wav/wav_1/00082.wav filter=lfs diff=lfs merge=lfs -text
122
+ wav/wav_1/00083.wav filter=lfs diff=lfs merge=lfs -text
123
+ wav/wav_1/00084.wav filter=lfs diff=lfs merge=lfs -text
124
+ wav/wav_1/00085.wav filter=lfs diff=lfs merge=lfs -text
125
+ wav/wav_1/00086.wav filter=lfs diff=lfs merge=lfs -text
126
+ wav/wav_1/00087.wav filter=lfs diff=lfs merge=lfs -text
127
+ wav/wav_1/00088.wav filter=lfs diff=lfs merge=lfs -text
128
+ wav/wav_1/00089.wav filter=lfs diff=lfs merge=lfs -text
129
+ wav/wav_1/00090.wav filter=lfs diff=lfs merge=lfs -text
130
+ wav/wav_1/00091.wav filter=lfs diff=lfs merge=lfs -text
131
+ wav/wav_1/00092.wav filter=lfs diff=lfs merge=lfs -text
132
+ wav/wav_1/00093.wav filter=lfs diff=lfs merge=lfs -text
133
+ wav/wav_1/00094.wav filter=lfs diff=lfs merge=lfs -text
134
+ wav/wav_1/00096.wav filter=lfs diff=lfs merge=lfs -text
135
+ wav/wav_1/00097.wav filter=lfs diff=lfs merge=lfs -text
136
+ wav/wav_1/00098.wav filter=lfs diff=lfs merge=lfs -text
137
+ wav/wav_1/00099.wav filter=lfs diff=lfs merge=lfs -text
138
+ wav/wav_1/00100.wav filter=lfs diff=lfs merge=lfs -text
139
+ wav/wav_1/00101.wav filter=lfs diff=lfs merge=lfs -text
140
+ wav/wav_1/00102.wav filter=lfs diff=lfs merge=lfs -text
141
+ wav/wav_1/00103.wav filter=lfs diff=lfs merge=lfs -text
142
+ wav/wav_1/00104.wav filter=lfs diff=lfs merge=lfs -text
143
+ wav/wav_1/00105.wav filter=lfs diff=lfs merge=lfs -text
144
+ wav/wav_1/00106.wav filter=lfs diff=lfs merge=lfs -text
145
+ wav/wav_1/00107.wav filter=lfs diff=lfs merge=lfs -text
146
+ wav/wav_1/00108.wav filter=lfs diff=lfs merge=lfs -text
147
+ wav/wav_1/00109.wav filter=lfs diff=lfs merge=lfs -text
148
+ wav/wav_1/00110.wav filter=lfs diff=lfs merge=lfs -text
149
+ wav/wav_1/00111.wav filter=lfs diff=lfs merge=lfs -text
150
+ wav/wav_1/00112.wav filter=lfs diff=lfs merge=lfs -text
151
+ wav/wav_1/00113.wav filter=lfs diff=lfs merge=lfs -text
152
+ wav/wav_1/00114.wav filter=lfs diff=lfs merge=lfs -text
153
+ wav/wav_1/00115.wav filter=lfs diff=lfs merge=lfs -text
154
+ wav/wav_1/00116.wav filter=lfs diff=lfs merge=lfs -text
155
+ wav/wav_1/00117.wav filter=lfs diff=lfs merge=lfs -text
156
+ wav/wav_1/00118.wav filter=lfs diff=lfs merge=lfs -text
157
+ wav/wav_1/00119.wav filter=lfs diff=lfs merge=lfs -text
158
+ wav/wav_1/00120.wav filter=lfs diff=lfs merge=lfs -text
159
+ wav/wav_1/00121.wav filter=lfs diff=lfs merge=lfs -text
160
+ wav/wav_1/00122.wav filter=lfs diff=lfs merge=lfs -text
161
+ wav/wav_1/00123.wav filter=lfs diff=lfs merge=lfs -text
162
+ wav/wav_1/00124.wav filter=lfs diff=lfs merge=lfs -text
163
+ wav/wav_1/00125.wav filter=lfs diff=lfs merge=lfs -text
164
+ wav/wav_1/00126.wav filter=lfs diff=lfs merge=lfs -text
165
+ wav/wav_1/00127.wav filter=lfs diff=lfs merge=lfs -text
166
+ wav/wav_1/00128.wav filter=lfs diff=lfs merge=lfs -text
167
+ wav/wav_1/00129.wav filter=lfs diff=lfs merge=lfs -text
168
+ wav/wav_1/00130.wav filter=lfs diff=lfs merge=lfs -text
169
+ wav/wav_1/00131.wav filter=lfs diff=lfs merge=lfs -text
170
+ wav/wav_1/00132.wav filter=lfs diff=lfs merge=lfs -text
171
+ wav/wav_1/00133.wav filter=lfs diff=lfs merge=lfs -text
172
+ wav/wav_1/00134.wav filter=lfs diff=lfs merge=lfs -text
173
+ wav/wav_1/00135.wav filter=lfs diff=lfs merge=lfs -text
174
+ wav/wav_1/00136.wav filter=lfs diff=lfs merge=lfs -text
175
+ wav/wav_1/00137.wav filter=lfs diff=lfs merge=lfs -text
176
+ wav/wav_1/00138.wav filter=lfs diff=lfs merge=lfs -text
177
+ wav/wav_1/00139.wav filter=lfs diff=lfs merge=lfs -text
178
+ wav/wav_1/00140.wav filter=lfs diff=lfs merge=lfs -text
179
+ wav/wav_1/00141.wav filter=lfs diff=lfs merge=lfs -text
180
+ wav/wav_1/00142.wav filter=lfs diff=lfs merge=lfs -text
181
+ wav/wav_1/00143.wav filter=lfs diff=lfs merge=lfs -text
182
+ wav/wav_1/00144.wav filter=lfs diff=lfs merge=lfs -text
183
+ wav/wav_1/00145.wav filter=lfs diff=lfs merge=lfs -text
184
+ wav/wav_1/00146.wav filter=lfs diff=lfs merge=lfs -text
185
+ wav/wav_1/00147.wav filter=lfs diff=lfs merge=lfs -text
186
+ wav/wav_1/00148.wav filter=lfs diff=lfs merge=lfs -text
187
+ wav/wav_1/00149.wav filter=lfs diff=lfs merge=lfs -text
188
+ wav/wav_1/00150.wav filter=lfs diff=lfs merge=lfs -text
189
+ wav/wav_1/00151.wav filter=lfs diff=lfs merge=lfs -text
190
+ wav/wav_1/00152.wav filter=lfs diff=lfs merge=lfs -text
191
+ wav/wav_1/00153.wav filter=lfs diff=lfs merge=lfs -text
192
+ wav/wav_1/00154.wav filter=lfs diff=lfs merge=lfs -text
193
+ wav/wav_1/00155.wav filter=lfs diff=lfs merge=lfs -text
194
+ wav/wav_1/00156.wav filter=lfs diff=lfs merge=lfs -text
195
+ wav/wav_1/00157.wav filter=lfs diff=lfs merge=lfs -text
196
+ wav/wav_1/00158.wav filter=lfs diff=lfs merge=lfs -text
197
+ wav/wav_1/00159.wav filter=lfs diff=lfs merge=lfs -text
198
+ wav/wav_1/00160.wav filter=lfs diff=lfs merge=lfs -text
199
+ wav/wav_1/00161.wav filter=lfs diff=lfs merge=lfs -text
200
+ wav/wav_1/00162.wav filter=lfs diff=lfs merge=lfs -text
201
+ wav/wav_1/00163.wav filter=lfs diff=lfs merge=lfs -text
202
+ wav/wav_1/00164.wav filter=lfs diff=lfs merge=lfs -text
203
+ wav/wav_1/00165.wav filter=lfs diff=lfs merge=lfs -text
204
+ wav/wav_1/00166.wav filter=lfs diff=lfs merge=lfs -text
205
+ wav/wav_1/00167.wav filter=lfs diff=lfs merge=lfs -text
206
+ wav/wav_1/00168.wav filter=lfs diff=lfs merge=lfs -text
207
+ wav/wav_1/00169.wav filter=lfs diff=lfs merge=lfs -text
208
+ wav/wav_1/00170.wav filter=lfs diff=lfs merge=lfs -text
209
+ wav/wav_1/00171.wav filter=lfs diff=lfs merge=lfs -text
210
+ wav/wav_1/00172.wav filter=lfs diff=lfs merge=lfs -text
211
+ wav/wav_1/00173.wav filter=lfs diff=lfs merge=lfs -text
212
+ wav/wav_1/00174.wav filter=lfs diff=lfs merge=lfs -text
213
+ wav/wav_1/00175.wav filter=lfs diff=lfs merge=lfs -text
214
+ wav/wav_1/00176.wav filter=lfs diff=lfs merge=lfs -text
215
+ wav/wav_1/00177.wav filter=lfs diff=lfs merge=lfs -text
216
+ wav/wav_1/00178.wav filter=lfs diff=lfs merge=lfs -text
217
+ wav/wav_1/00179.wav filter=lfs diff=lfs merge=lfs -text
218
+ wav/wav_1/00180.wav filter=lfs diff=lfs merge=lfs -text
219
+ wav/wav_1/00181.wav filter=lfs diff=lfs merge=lfs -text
220
+ wav/wav_1/00182.wav filter=lfs diff=lfs merge=lfs -text
221
+ wav/wav_1/00183.wav filter=lfs diff=lfs merge=lfs -text
222
+ wav/wav_1/00184.wav filter=lfs diff=lfs merge=lfs -text
223
+ wav/wav_1/00185.wav filter=lfs diff=lfs merge=lfs -text
224
+ wav/wav_1/00186.wav filter=lfs diff=lfs merge=lfs -text
225
+ wav/wav_1/00187.wav filter=lfs diff=lfs merge=lfs -text
226
+ wav/wav_1/00188.wav filter=lfs diff=lfs merge=lfs -text
227
+ wav/wav_1/00189.wav filter=lfs diff=lfs merge=lfs -text
228
+ wav/wav_1/00190.wav filter=lfs diff=lfs merge=lfs -text
229
+ wav/wav_1/00191.wav filter=lfs diff=lfs merge=lfs -text
230
+ wav/wav_1/00192.wav filter=lfs diff=lfs merge=lfs -text
231
+ wav/wav_1/00193.wav filter=lfs diff=lfs merge=lfs -text
232
+ wav/wav_1/00194.wav filter=lfs diff=lfs merge=lfs -text
233
+ wav/wav_1/00195.wav filter=lfs diff=lfs merge=lfs -text
234
+ wav/wav_1/00196.wav filter=lfs diff=lfs merge=lfs -text
235
+ wav/wav_1/00197.wav filter=lfs diff=lfs merge=lfs -text
236
+ wav/wav_1/00198.wav filter=lfs diff=lfs merge=lfs -text
237
+ wav/wav_1/00199.wav filter=lfs diff=lfs merge=lfs -text
238
+ wav/wav_1/00200.wav filter=lfs diff=lfs merge=lfs -text
239
+ wav/wav_1/00201.wav filter=lfs diff=lfs merge=lfs -text
240
+ wav/wav_1/00202.wav filter=lfs diff=lfs merge=lfs -text
241
+ wav/wav_1/00203.wav filter=lfs diff=lfs merge=lfs -text
242
+ wav/wav_1/00204.wav filter=lfs diff=lfs merge=lfs -text
243
+ wav/wav_1/00205.wav filter=lfs diff=lfs merge=lfs -text
244
+ wav/wav_1/00206.wav filter=lfs diff=lfs merge=lfs -text
245
+ wav/wav_1/00207.wav filter=lfs diff=lfs merge=lfs -text
246
+ wav/wav_1/00208.wav filter=lfs diff=lfs merge=lfs -text
247
+ wav/wav_1/00209.wav filter=lfs diff=lfs merge=lfs -text
248
+ wav/wav_1/00210.wav filter=lfs diff=lfs merge=lfs -text
249
+ wav/wav_1/00211.wav filter=lfs diff=lfs merge=lfs -text
250
+ wav/wav_1/00212.wav filter=lfs diff=lfs merge=lfs -text
251
+ wav/wav_1/00213.wav filter=lfs diff=lfs merge=lfs -text
252
+ wav/wav_1/00214.wav filter=lfs diff=lfs merge=lfs -text
253
+ wav/wav_1/00215.wav filter=lfs diff=lfs merge=lfs -text
254
+ wav/wav_1/00216.wav filter=lfs diff=lfs merge=lfs -text
255
+ wav/wav_1/00217.wav filter=lfs diff=lfs merge=lfs -text
256
+ wav/wav_1/00218.wav filter=lfs diff=lfs merge=lfs -text
257
+ wav/wav_1/00219.wav filter=lfs diff=lfs merge=lfs -text
258
+ wav/wav_1/00221.wav filter=lfs diff=lfs merge=lfs -text
259
+ wav/wav_1/00222.wav filter=lfs diff=lfs merge=lfs -text
260
+ wav/wav_1/00223.wav filter=lfs diff=lfs merge=lfs -text
261
+ wav/wav_1/00224.wav filter=lfs diff=lfs merge=lfs -text
262
+ wav/wav_1/00225.wav filter=lfs diff=lfs merge=lfs -text
263
+ wav/wav_1/00226.wav filter=lfs diff=lfs merge=lfs -text
264
+ wav/wav_1/00227.wav filter=lfs diff=lfs merge=lfs -text
265
+ wav/wav_1/00228.wav filter=lfs diff=lfs merge=lfs -text
266
+ wav/wav_1/00229.wav filter=lfs diff=lfs merge=lfs -text
267
+ wav/wav_1/00230.wav filter=lfs diff=lfs merge=lfs -text
268
+ wav/wav_1/00231.wav filter=lfs diff=lfs merge=lfs -text
269
+ wav/wav_1/00232.wav filter=lfs diff=lfs merge=lfs -text
270
+ wav/wav_1/00233.wav filter=lfs diff=lfs merge=lfs -text
271
+ wav/wav_1/00234.wav filter=lfs diff=lfs merge=lfs -text
272
+ wav/wav_1/00235.wav filter=lfs diff=lfs merge=lfs -text
273
+ wav/wav_1/00236.wav filter=lfs diff=lfs merge=lfs -text
274
+ wav/wav_1/00237.wav filter=lfs diff=lfs merge=lfs -text
275
+ wav/wav_1/00238.wav filter=lfs diff=lfs merge=lfs -text
276
+ wav/wav_1/00239.wav filter=lfs diff=lfs merge=lfs -text
277
+ wav/wav_1/00240.wav filter=lfs diff=lfs merge=lfs -text
278
+ wav/wav_1/00241.wav filter=lfs diff=lfs merge=lfs -text
279
+ wav/wav_1/00242.wav filter=lfs diff=lfs merge=lfs -text
280
+ wav/wav_1/00243.wav filter=lfs diff=lfs merge=lfs -text
281
+ wav/wav_1/00244.wav filter=lfs diff=lfs merge=lfs -text
282
+ wav/wav_1/00245.wav filter=lfs diff=lfs merge=lfs -text
283
+ wav/wav_1/00246.wav filter=lfs diff=lfs merge=lfs -text
284
+ wav/wav_1/00247.wav filter=lfs diff=lfs merge=lfs -text
285
+ wav/wav_1/00248.wav filter=lfs diff=lfs merge=lfs -text
286
+ wav/wav_1/00249.wav filter=lfs diff=lfs merge=lfs -text
287
+ wav/wav_1/00250.wav filter=lfs diff=lfs merge=lfs -text
288
+ wav/wav_1/00251.wav filter=lfs diff=lfs merge=lfs -text
289
+ wav/wav_1/00252.wav filter=lfs diff=lfs merge=lfs -text
290
+ wav/wav_1/00253.wav filter=lfs diff=lfs merge=lfs -text
291
+ wav/wav_1/00254.wav filter=lfs diff=lfs merge=lfs -text
292
+ wav/wav_1/00255.wav filter=lfs diff=lfs merge=lfs -text
293
+ wav/wav_1/00256.wav filter=lfs diff=lfs merge=lfs -text
294
+ wav/wav_1/00257.wav filter=lfs diff=lfs merge=lfs -text
295
+ wav/wav_1/00258.wav filter=lfs diff=lfs merge=lfs -text
296
+ wav/wav_1/00259.wav filter=lfs diff=lfs merge=lfs -text
297
+ wav/wav_1/00260.wav filter=lfs diff=lfs merge=lfs -text
298
+ wav/wav_1/00261.wav filter=lfs diff=lfs merge=lfs -text
299
+ wav/wav_1/00262.wav filter=lfs diff=lfs merge=lfs -text
300
+ wav/wav_1/00263.wav filter=lfs diff=lfs merge=lfs -text
301
+ wav/wav_1/00264.wav filter=lfs diff=lfs merge=lfs -text
302
+ wav/wav_1/00265.wav filter=lfs diff=lfs merge=lfs -text
303
+ wav/wav_1/00266.wav filter=lfs diff=lfs merge=lfs -text
304
+ wav/wav_1/00267.wav filter=lfs diff=lfs merge=lfs -text
305
+ wav/wav_1/00268.wav filter=lfs diff=lfs merge=lfs -text
306
+ wav/wav_1/00269.wav filter=lfs diff=lfs merge=lfs -text
307
+ wav/wav_1/00270.wav filter=lfs diff=lfs merge=lfs -text
308
+ wav/wav_1/00271.wav filter=lfs diff=lfs merge=lfs -text
309
+ wav/wav_1/00272.wav filter=lfs diff=lfs merge=lfs -text
310
+ wav/wav_1/00273.wav filter=lfs diff=lfs merge=lfs -text
311
+ wav/wav_1/00274.wav filter=lfs diff=lfs merge=lfs -text
312
+ wav/wav_1/00275.wav filter=lfs diff=lfs merge=lfs -text
313
+ wav/wav_1/00276.wav filter=lfs diff=lfs merge=lfs -text
314
+ wav/wav_1/00277.wav filter=lfs diff=lfs merge=lfs -text
315
+ wav/wav_1/00278.wav filter=lfs diff=lfs merge=lfs -text
316
+ wav/wav_1/00279.wav filter=lfs diff=lfs merge=lfs -text
317
+ wav/wav_1/00280.wav filter=lfs diff=lfs merge=lfs -text
318
+ wav/wav_1/00281.wav filter=lfs diff=lfs merge=lfs -text
319
+ wav/wav_1/00282.wav filter=lfs diff=lfs merge=lfs -text
320
+ wav/wav_1/00283.wav filter=lfs diff=lfs merge=lfs -text
321
+ wav/wav_1/00284.wav filter=lfs diff=lfs merge=lfs -text
322
+ wav/wav_1/00285.wav filter=lfs diff=lfs merge=lfs -text
323
+ wav/wav_1/00286.wav filter=lfs diff=lfs merge=lfs -text
324
+ wav/wav_1/00287.wav filter=lfs diff=lfs merge=lfs -text
325
+ wav/wav_1/00288.wav filter=lfs diff=lfs merge=lfs -text
326
+ wav/wav_1/00289.wav filter=lfs diff=lfs merge=lfs -text
327
+ wav/wav_1/00290.wav filter=lfs diff=lfs merge=lfs -text
328
+ wav/wav_1/00291.wav filter=lfs diff=lfs merge=lfs -text
329
+ wav/wav_1/00292.wav filter=lfs diff=lfs merge=lfs -text
330
+ wav/wav_1/00293.wav filter=lfs diff=lfs merge=lfs -text
331
+ wav/wav_1/00294.wav filter=lfs diff=lfs merge=lfs -text
332
+ wav/wav_1/00295.wav filter=lfs diff=lfs merge=lfs -text
333
+ wav/wav_1/00296.wav filter=lfs diff=lfs merge=lfs -text
334
+ wav/wav_1/00297.wav filter=lfs diff=lfs merge=lfs -text
335
+ wav/wav_1/00298.wav filter=lfs diff=lfs merge=lfs -text
336
+ wav/wav_1/00299.wav filter=lfs diff=lfs merge=lfs -text
337
+ wav/wav_1/00300.wav filter=lfs diff=lfs merge=lfs -text
338
+ wav/wav_1/00301.wav filter=lfs diff=lfs merge=lfs -text
339
+ wav/wav_1/00302.wav filter=lfs diff=lfs merge=lfs -text
340
+ wav/wav_1/00303.wav filter=lfs diff=lfs merge=lfs -text
341
+ wav/wav_1/00304.wav filter=lfs diff=lfs merge=lfs -text
342
+ wav/wav_1/00305.wav filter=lfs diff=lfs merge=lfs -text
343
+ wav/wav_1/00306.wav filter=lfs diff=lfs merge=lfs -text
344
+ wav/wav_1/00307.wav filter=lfs diff=lfs merge=lfs -text
345
+ wav/wav_1/00308.wav filter=lfs diff=lfs merge=lfs -text
346
+ wav/wav_1/00309.wav filter=lfs diff=lfs merge=lfs -text
347
+ wav/wav_1/00310.wav filter=lfs diff=lfs merge=lfs -text
348
+ wav/wav_1/00311.wav filter=lfs diff=lfs merge=lfs -text
349
+ wav/wav_1/00312.wav filter=lfs diff=lfs merge=lfs -text
350
+ wav/wav_1/00313.wav filter=lfs diff=lfs merge=lfs -text
351
+ wav/wav_1/00314.wav filter=lfs diff=lfs merge=lfs -text
352
+ wav/wav_1/00315.wav filter=lfs diff=lfs merge=lfs -text
353
+ wav/wav_1/00316.wav filter=lfs diff=lfs merge=lfs -text
354
+ wav/wav_1/00317.wav filter=lfs diff=lfs merge=lfs -text
355
+ wav/wav_1/00318.wav filter=lfs diff=lfs merge=lfs -text
356
+ wav/wav_1/00319.wav filter=lfs diff=lfs merge=lfs -text
357
+ wav/wav_1/00320.wav filter=lfs diff=lfs merge=lfs -text
358
+ wav/wav_1/00321.wav filter=lfs diff=lfs merge=lfs -text
359
+ wav/wav_1/00322.wav filter=lfs diff=lfs merge=lfs -text
360
+ wav/wav_1/00323.wav filter=lfs diff=lfs merge=lfs -text
361
+ wav/wav_1/00324.wav filter=lfs diff=lfs merge=lfs -text
362
+ wav/wav_1/00325.wav filter=lfs diff=lfs merge=lfs -text
363
+ wav/wav_1/00326.wav filter=lfs diff=lfs merge=lfs -text
364
+ wav/wav_1/00327.wav filter=lfs diff=lfs merge=lfs -text
365
+ wav/wav_1/00328.wav filter=lfs diff=lfs merge=lfs -text
366
+ wav/wav_1/00329.wav filter=lfs diff=lfs merge=lfs -text
367
+ wav/wav_1/00330.wav filter=lfs diff=lfs merge=lfs -text
368
+ wav/wav_1/00331.wav filter=lfs diff=lfs merge=lfs -text
369
+ wav/wav_1/00332.wav filter=lfs diff=lfs merge=lfs -text
370
+ wav/wav_1/00333.wav filter=lfs diff=lfs merge=lfs -text
371
+ wav/wav_1/00334.wav filter=lfs diff=lfs merge=lfs -text
372
+ wav/wav_1/00335.wav filter=lfs diff=lfs merge=lfs -text
373
+ wav/wav_1/00336.wav filter=lfs diff=lfs merge=lfs -text
374
+ wav/wav_1/00337.wav filter=lfs diff=lfs merge=lfs -text
375
+ wav/wav_1/00338.wav filter=lfs diff=lfs merge=lfs -text
376
+ wav/wav_1/00339.wav filter=lfs diff=lfs merge=lfs -text
377
+ wav/wav_1/00340.wav filter=lfs diff=lfs merge=lfs -text
378
+ wav/wav_1/00341.wav filter=lfs diff=lfs merge=lfs -text
379
+ wav/wav_1/00342.wav filter=lfs diff=lfs merge=lfs -text
380
+ wav/wav_1/00343.wav filter=lfs diff=lfs merge=lfs -text
381
+ wav/wav_1/00344.wav filter=lfs diff=lfs merge=lfs -text
382
+ wav/wav_1/00345.wav filter=lfs diff=lfs merge=lfs -text
383
+ wav/wav_1/00346.wav filter=lfs diff=lfs merge=lfs -text
384
+ wav/wav_1/00347.wav filter=lfs diff=lfs merge=lfs -text
385
+ wav/wav_1/00348.wav filter=lfs diff=lfs merge=lfs -text
386
+ wav/wav_1/00349.wav filter=lfs diff=lfs merge=lfs -text
387
+ wav/wav_1/00350.wav filter=lfs diff=lfs merge=lfs -text
388
+ wav/wav_1/00351.wav filter=lfs diff=lfs merge=lfs -text
389
+ wav/wav_1/00352.wav filter=lfs diff=lfs merge=lfs -text
390
+ wav/wav_1/00353.wav filter=lfs diff=lfs merge=lfs -text
391
+ wav/wav_1/00354.wav filter=lfs diff=lfs merge=lfs -text
392
+ wav/wav_1/00355.wav filter=lfs diff=lfs merge=lfs -text
393
+ wav/wav_1/00356.wav filter=lfs diff=lfs merge=lfs -text
394
+ wav/wav_1/00357.wav filter=lfs diff=lfs merge=lfs -text
395
+ wav/wav_1/00358.wav filter=lfs diff=lfs merge=lfs -text
396
+ wav/wav_1/00359.wav filter=lfs diff=lfs merge=lfs -text
397
+ wav/wav_1/00360.wav filter=lfs diff=lfs merge=lfs -text
398
+ wav/wav_1/00361.wav filter=lfs diff=lfs merge=lfs -text
399
+ wav/wav_1/00362.wav filter=lfs diff=lfs merge=lfs -text
400
+ wav/wav_1/00363.wav filter=lfs diff=lfs merge=lfs -text
401
+ wav/wav_1/00364.wav filter=lfs diff=lfs merge=lfs -text
402
+ wav/wav_1/00365.wav filter=lfs diff=lfs merge=lfs -text
403
+ wav/wav_1/00366.wav filter=lfs diff=lfs merge=lfs -text
404
+ wav/wav_1/00367.wav filter=lfs diff=lfs merge=lfs -text
405
+ wav/wav_1/00368.wav filter=lfs diff=lfs merge=lfs -text
406
+ wav/wav_1/00369.wav filter=lfs diff=lfs merge=lfs -text
407
+ wav/wav_1/00370.wav filter=lfs diff=lfs merge=lfs -text
408
+ wav/wav_1/00371.wav filter=lfs diff=lfs merge=lfs -text
409
+ wav/wav_1/00372.wav filter=lfs diff=lfs merge=lfs -text
410
+ wav/wav_1/00373.wav filter=lfs diff=lfs merge=lfs -text
411
+ wav/wav_1/00374.wav filter=lfs diff=lfs merge=lfs -text
412
+ wav/wav_1/00375.wav filter=lfs diff=lfs merge=lfs -text
413
+ wav/wav_1/00376.wav filter=lfs diff=lfs merge=lfs -text
414
+ wav/wav_1/00377.wav filter=lfs diff=lfs merge=lfs -text
415
+ wav/wav_1/00378.wav filter=lfs diff=lfs merge=lfs -text
416
+ wav/wav_1/00379.wav filter=lfs diff=lfs merge=lfs -text
417
+ wav/wav_1/00380.wav filter=lfs diff=lfs merge=lfs -text
418
+ wav/wav_1/00381.wav filter=lfs diff=lfs merge=lfs -text
419
+ wav/wav_1/00382.wav filter=lfs diff=lfs merge=lfs -text
420
+ wav/wav_1/00383.wav filter=lfs diff=lfs merge=lfs -text
421
+ wav/wav_1/00384.wav filter=lfs diff=lfs merge=lfs -text
422
+ wav/wav_1/00385.wav filter=lfs diff=lfs merge=lfs -text
423
+ wav/wav_1/00386.wav filter=lfs diff=lfs merge=lfs -text
424
+ wav/wav_1/00387.wav filter=lfs diff=lfs merge=lfs -text
425
+ wav/wav_1/00388.wav filter=lfs diff=lfs merge=lfs -text
426
+ wav/wav_1/00389.wav filter=lfs diff=lfs merge=lfs -text
427
+ wav/wav_1/00390.wav filter=lfs diff=lfs merge=lfs -text
428
+ wav/wav_1/00391.wav filter=lfs diff=lfs merge=lfs -text
429
+ wav/wav_1/00392.wav filter=lfs diff=lfs merge=lfs -text
430
+ wav/wav_1/00393.wav filter=lfs diff=lfs merge=lfs -text
431
+ wav/wav_1/00394.wav filter=lfs diff=lfs merge=lfs -text
432
+ wav/wav_1/00395.wav filter=lfs diff=lfs merge=lfs -text
433
+ wav/wav_1/00396.wav filter=lfs diff=lfs merge=lfs -text
434
+ wav/wav_1/00397.wav filter=lfs diff=lfs merge=lfs -text
435
+ wav/wav_1/00398.wav filter=lfs diff=lfs merge=lfs -text
436
+ wav/wav_1/00399.wav filter=lfs diff=lfs merge=lfs -text
437
+ wav/wav_1/00400.wav filter=lfs diff=lfs merge=lfs -text
438
+ wav/wav_1/00401.wav filter=lfs diff=lfs merge=lfs -text
439
+ wav/wav_1/00402.wav filter=lfs diff=lfs merge=lfs -text
440
+ wav/wav_1/00403.wav filter=lfs diff=lfs merge=lfs -text
441
+ wav/wav_1/00404.wav filter=lfs diff=lfs merge=lfs -text
442
+ wav/wav_1/00405.wav filter=lfs diff=lfs merge=lfs -text
443
+ wav/wav_1/00406.wav filter=lfs diff=lfs merge=lfs -text
444
+ wav/wav_1/00407.wav filter=lfs diff=lfs merge=lfs -text
445
+ wav/wav_1/00408.wav filter=lfs diff=lfs merge=lfs -text
446
+ wav/wav_1/00409.wav filter=lfs diff=lfs merge=lfs -text
447
+ wav/wav_1/00410.wav filter=lfs diff=lfs merge=lfs -text
448
+ wav/wav_1/00411.wav filter=lfs diff=lfs merge=lfs -text
449
+ wav/wav_1/00412.wav filter=lfs diff=lfs merge=lfs -text
450
+ wav/wav_1/00413.wav filter=lfs diff=lfs merge=lfs -text
451
+ wav/wav_1/00414.wav filter=lfs diff=lfs merge=lfs -text
452
+ wav/wav_1/00415.wav filter=lfs diff=lfs merge=lfs -text
453
+ wav/wav_1/00416.wav filter=lfs diff=lfs merge=lfs -text
454
+ wav/wav_1/00417.wav filter=lfs diff=lfs merge=lfs -text
455
+ wav/wav_1/00418.wav filter=lfs diff=lfs merge=lfs -text
456
+ wav/wav_1/00419.wav filter=lfs diff=lfs merge=lfs -text
457
+ wav/wav_1/00420.wav filter=lfs diff=lfs merge=lfs -text
458
+ wav/wav_1/00421.wav filter=lfs diff=lfs merge=lfs -text
459
+ wav/wav_1/00422.wav filter=lfs diff=lfs merge=lfs -text
460
+ wav/wav_1/00423.wav filter=lfs diff=lfs merge=lfs -text
461
+ wav/wav_1/00424.wav filter=lfs diff=lfs merge=lfs -text
462
+ wav/wav_1/00425.wav filter=lfs diff=lfs merge=lfs -text
463
+ wav/wav_1/00426.wav filter=lfs diff=lfs merge=lfs -text
464
+ wav/wav_1/00428.wav filter=lfs diff=lfs merge=lfs -text
465
+ wav/wav_1/00429.wav filter=lfs diff=lfs merge=lfs -text
466
+ wav/wav_1/00430.wav filter=lfs diff=lfs merge=lfs -text
467
+ wav/wav_1/00431.wav filter=lfs diff=lfs merge=lfs -text
468
+ wav/wav_1/00432.wav filter=lfs diff=lfs merge=lfs -text
469
+ wav/wav_1/00433.wav filter=lfs diff=lfs merge=lfs -text
470
+ wav/wav_1/00434.wav filter=lfs diff=lfs merge=lfs -text
471
+ wav/wav_1/00435.wav filter=lfs diff=lfs merge=lfs -text
472
+ wav/wav_1/00436.wav filter=lfs diff=lfs merge=lfs -text
473
+ wav/wav_1/00437.wav filter=lfs diff=lfs merge=lfs -text
474
+ wav/wav_1/00438.wav filter=lfs diff=lfs merge=lfs -text
475
+ wav/wav_1/00439.wav filter=lfs diff=lfs merge=lfs -text
476
+ wav/wav_1/00440.wav filter=lfs diff=lfs merge=lfs -text
477
+ wav/wav_1/00441.wav filter=lfs diff=lfs merge=lfs -text
478
+ wav/wav_1/00442.wav filter=lfs diff=lfs merge=lfs -text
479
+ wav/wav_1/00443.wav filter=lfs diff=lfs merge=lfs -text
480
+ wav/wav_1/00444.wav filter=lfs diff=lfs merge=lfs -text
481
+ wav/wav_1/00445.wav filter=lfs diff=lfs merge=lfs -text
482
+ wav/wav_1/00446.wav filter=lfs diff=lfs merge=lfs -text
483
+ wav/wav_1/00447.wav filter=lfs diff=lfs merge=lfs -text
484
+ wav/wav_1/00448.wav filter=lfs diff=lfs merge=lfs -text
485
+ wav/wav_1/00449.wav filter=lfs diff=lfs merge=lfs -text
486
+ wav/wav_1/00450.wav filter=lfs diff=lfs merge=lfs -text
487
+ wav/wav_1/00451.wav filter=lfs diff=lfs merge=lfs -text
488
+ wav/wav_1/00452.wav filter=lfs diff=lfs merge=lfs -text
489
+ wav/wav_1/00453.wav filter=lfs diff=lfs merge=lfs -text
490
+ wav/wav_1/00454.wav filter=lfs diff=lfs merge=lfs -text
491
+ wav/wav_1/00455.wav filter=lfs diff=lfs merge=lfs -text
492
+ wav/wav_1/00456.wav filter=lfs diff=lfs merge=lfs -text
493
+ wav/wav_1/00457.wav filter=lfs diff=lfs merge=lfs -text
494
+ wav/wav_1/00458.wav filter=lfs diff=lfs merge=lfs -text
495
+ wav/wav_1/00459.wav filter=lfs diff=lfs merge=lfs -text
496
+ wav/wav_1/00460.wav filter=lfs diff=lfs merge=lfs -text
497
+ wav/wav_1/00461.wav filter=lfs diff=lfs merge=lfs -text
498
+ wav/wav_1/00462.wav filter=lfs diff=lfs merge=lfs -text
499
+ wav/wav_1/00463.wav filter=lfs diff=lfs merge=lfs -text
500
+ wav/wav_1/00464.wav filter=lfs diff=lfs merge=lfs -text
501
+ wav/wav_1/00465.wav filter=lfs diff=lfs merge=lfs -text
502
+ wav/wav_1/00466.wav filter=lfs diff=lfs merge=lfs -text
503
+ wav/wav_1/00467.wav filter=lfs diff=lfs merge=lfs -text
504
+ wav/wav_1/00468.wav filter=lfs diff=lfs merge=lfs -text
505
+ wav/wav_1/00469.wav filter=lfs diff=lfs merge=lfs -text
506
+ wav/wav_1/00470.wav filter=lfs diff=lfs merge=lfs -text
507
+ wav/wav_1/00471.wav filter=lfs diff=lfs merge=lfs -text
508
+ wav/wav_1/00472.wav filter=lfs diff=lfs merge=lfs -text
509
+ wav/wav_1/00474.wav filter=lfs diff=lfs merge=lfs -text
510
+ wav/wav_1/00475.wav filter=lfs diff=lfs merge=lfs -text
511
+ wav/wav_1/00476.wav filter=lfs diff=lfs merge=lfs -text
512
+ wav/wav_1/00477.wav filter=lfs diff=lfs merge=lfs -text
513
+ wav/wav_1/00478.wav filter=lfs diff=lfs merge=lfs -text
514
+ wav/wav_1/00479.wav filter=lfs diff=lfs merge=lfs -text
515
+ wav/wav_1/00480.wav filter=lfs diff=lfs merge=lfs -text
516
+ wav/wav_1/00481.wav filter=lfs diff=lfs merge=lfs -text
517
+ wav/wav_1/00482.wav filter=lfs diff=lfs merge=lfs -text
518
+ wav/wav_1/00483.wav filter=lfs diff=lfs merge=lfs -text
519
+ wav/wav_1/00484.wav filter=lfs diff=lfs merge=lfs -text
520
+ wav/wav_1/00485.wav filter=lfs diff=lfs merge=lfs -text
521
+ wav/wav_1/00486.wav filter=lfs diff=lfs merge=lfs -text
522
+ wav/wav_1/00487.wav filter=lfs diff=lfs merge=lfs -text
523
+ wav/wav_1/00488.wav filter=lfs diff=lfs merge=lfs -text
524
+ wav/wav_1/00489.wav filter=lfs diff=lfs merge=lfs -text
525
+ wav/wav_1/00490.wav filter=lfs diff=lfs merge=lfs -text
526
+ wav/wav_1/00491.wav filter=lfs diff=lfs merge=lfs -text
527
+ wav/wav_1/00492.wav filter=lfs diff=lfs merge=lfs -text
528
+ wav/wav_1/00493.wav filter=lfs diff=lfs merge=lfs -text
529
+ wav/wav_1/00494.wav filter=lfs diff=lfs merge=lfs -text
530
+ wav/wav_1/00495.wav filter=lfs diff=lfs merge=lfs -text
531
+ wav/wav_1/00496.wav filter=lfs diff=lfs merge=lfs -text
532
+ wav/wav_1/00497.wav filter=lfs diff=lfs merge=lfs -text
533
+ wav/wav_1/00498.wav filter=lfs diff=lfs merge=lfs -text
534
+ wav/wav_1/00499.wav filter=lfs diff=lfs merge=lfs -text
535
+ wav/wav_1/00500.wav filter=lfs diff=lfs merge=lfs -text
536
+ wav/wav_1/00501.wav filter=lfs diff=lfs merge=lfs -text
537
+ wav/wav_1/00502.wav filter=lfs diff=lfs merge=lfs -text
538
+ wav/wav_1/00503.wav filter=lfs diff=lfs merge=lfs -text
539
+ wav/wav_1/00504.wav filter=lfs diff=lfs merge=lfs -text
540
+ wav/wav_1/00505.wav filter=lfs diff=lfs merge=lfs -text
541
+ wav/wav_1/00506.wav filter=lfs diff=lfs merge=lfs -text
542
+ wav/wav_1/00507.wav filter=lfs diff=lfs merge=lfs -text
543
+ wav/wav_1/00508.wav filter=lfs diff=lfs merge=lfs -text
544
+ wav/wav_1/00509.wav filter=lfs diff=lfs merge=lfs -text
545
+ wav/wav_1/00510.wav filter=lfs diff=lfs merge=lfs -text
546
+ wav/wav_1/00511.wav filter=lfs diff=lfs merge=lfs -text
547
+ wav/wav_1/00512.wav filter=lfs diff=lfs merge=lfs -text
548
+ wav/wav_1/00513.wav filter=lfs diff=lfs merge=lfs -text
549
+ wav/wav_1/00514.wav filter=lfs diff=lfs merge=lfs -text
550
+ wav/wav_1/00515.wav filter=lfs diff=lfs merge=lfs -text
551
+ wav/wav_1/00516.wav filter=lfs diff=lfs merge=lfs -text
552
+ wav/wav_1/00517.wav filter=lfs diff=lfs merge=lfs -text
553
+ wav/wav_1/00518.wav filter=lfs diff=lfs merge=lfs -text
554
+ wav/wav_1/00519.wav filter=lfs diff=lfs merge=lfs -text
555
+ wav/wav_1/00520.wav filter=lfs diff=lfs merge=lfs -text
556
+ wav/wav_1/00521.wav filter=lfs diff=lfs merge=lfs -text
557
+ wav/wav_1/00522.wav filter=lfs diff=lfs merge=lfs -text
558
+ wav/wav_1/00523.wav filter=lfs diff=lfs merge=lfs -text
559
+ wav/wav_1/00524.wav filter=lfs diff=lfs merge=lfs -text
560
+ wav/wav_1/00525.wav filter=lfs diff=lfs merge=lfs -text
561
+ wav/wav_1/00526.wav filter=lfs diff=lfs merge=lfs -text
562
+ wav/wav_1/00527.wav filter=lfs diff=lfs merge=lfs -text
563
+ wav/wav_1/00528.wav filter=lfs diff=lfs merge=lfs -text
564
+ wav/wav_1/00529.wav filter=lfs diff=lfs merge=lfs -text
565
+ wav/wav_1/00530.wav filter=lfs diff=lfs merge=lfs -text
566
+ wav/wav_1/00531.wav filter=lfs diff=lfs merge=lfs -text
567
+ wav/wav_1/00532.wav filter=lfs diff=lfs merge=lfs -text
568
+ wav/wav_1/00533.wav filter=lfs diff=lfs merge=lfs -text
569
+ wav/wav_1/00534.wav filter=lfs diff=lfs merge=lfs -text
570
+ wav/wav_1/00535.wav filter=lfs diff=lfs merge=lfs -text
571
+ wav/wav_1/00536.wav filter=lfs diff=lfs merge=lfs -text
572
+ wav/wav_1/00537.wav filter=lfs diff=lfs merge=lfs -text
573
+ wav/wav_1/00538.wav filter=lfs diff=lfs merge=lfs -text
574
+ wav/wav_1/00539.wav filter=lfs diff=lfs merge=lfs -text
575
+ wav/wav_1/00540.wav filter=lfs diff=lfs merge=lfs -text
576
+ wav/wav_1/00541.wav filter=lfs diff=lfs merge=lfs -text
577
+ wav/wav_1/00542.wav filter=lfs diff=lfs merge=lfs -text
578
+ wav/wav_1/00543.wav filter=lfs diff=lfs merge=lfs -text
579
+ wav/wav_1/00544.wav filter=lfs diff=lfs merge=lfs -text
580
+ wav/wav_1/00545.wav filter=lfs diff=lfs merge=lfs -text
581
+ wav/wav_1/00546.wav filter=lfs diff=lfs merge=lfs -text
582
+ wav/wav_1/00547.wav filter=lfs diff=lfs merge=lfs -text
583
+ wav/wav_1/00548.wav filter=lfs diff=lfs merge=lfs -text
584
+ wav/wav_1/00549.wav filter=lfs diff=lfs merge=lfs -text
585
+ wav/wav_1/00550.wav filter=lfs diff=lfs merge=lfs -text
586
+ wav/wav_1/00551.wav filter=lfs diff=lfs merge=lfs -text
587
+ wav/wav_1/00552.wav filter=lfs diff=lfs merge=lfs -text
588
+ wav/wav_1/00553.wav filter=lfs diff=lfs merge=lfs -text
589
+ wav/wav_1/00554.wav filter=lfs diff=lfs merge=lfs -text
590
+ wav/wav_1/00555.wav filter=lfs diff=lfs merge=lfs -text
591
+ wav/wav_1/00556.wav filter=lfs diff=lfs merge=lfs -text
592
+ wav/wav_1/00557.wav filter=lfs diff=lfs merge=lfs -text
593
+ wav/wav_1/00558.wav filter=lfs diff=lfs merge=lfs -text
594
+ wav/wav_1/00559.wav filter=lfs diff=lfs merge=lfs -text
595
+ wav/wav_1/00560.wav filter=lfs diff=lfs merge=lfs -text
596
+ wav/wav_1/00561.wav filter=lfs diff=lfs merge=lfs -text
597
+ wav/wav_1/00562.wav filter=lfs diff=lfs merge=lfs -text
598
+ wav/wav_1/00563.wav filter=lfs diff=lfs merge=lfs -text
599
+ wav/wav_1/00564.wav filter=lfs diff=lfs merge=lfs -text
600
+ wav/wav_1/00565.wav filter=lfs diff=lfs merge=lfs -text
601
+ wav/wav_1/00566.wav filter=lfs diff=lfs merge=lfs -text
602
+ wav/wav_1/00567.wav filter=lfs diff=lfs merge=lfs -text
603
+ wav/wav_1/00568.wav filter=lfs diff=lfs merge=lfs -text
604
+ wav/wav_1/00569.wav filter=lfs diff=lfs merge=lfs -text
605
+ wav/wav_1/00570.wav filter=lfs diff=lfs merge=lfs -text
606
+ wav/wav_1/00571.wav filter=lfs diff=lfs merge=lfs -text
607
+ wav/wav_1/00572.wav filter=lfs diff=lfs merge=lfs -text
608
+ wav/wav_1/00573.wav filter=lfs diff=lfs merge=lfs -text
609
+ wav/wav_1/00574.wav filter=lfs diff=lfs merge=lfs -text
610
+ wav/wav_1/00575.wav filter=lfs diff=lfs merge=lfs -text
611
+ wav/wav_1/00576.wav filter=lfs diff=lfs merge=lfs -text
612
+ wav/wav_1/00577.wav filter=lfs diff=lfs merge=lfs -text
613
+ wav/wav_1/00578.wav filter=lfs diff=lfs merge=lfs -text
614
+ wav/wav_1/00579.wav filter=lfs diff=lfs merge=lfs -text
615
+ wav/wav_1/00580.wav filter=lfs diff=lfs merge=lfs -text
616
+ wav/wav_1/00582.wav filter=lfs diff=lfs merge=lfs -text
617
+ wav/wav_1/00583.wav filter=lfs diff=lfs merge=lfs -text
618
+ wav/wav_1/00584.wav filter=lfs diff=lfs merge=lfs -text
619
+ wav/wav_1/00585.wav filter=lfs diff=lfs merge=lfs -text
620
+ wav/wav_1/00586.wav filter=lfs diff=lfs merge=lfs -text
621
+ wav/wav_1/00587.wav filter=lfs diff=lfs merge=lfs -text
622
+ wav/wav_1/00588.wav filter=lfs diff=lfs merge=lfs -text
623
+ wav/wav_1/00589.wav filter=lfs diff=lfs merge=lfs -text
624
+ wav/wav_1/00590.wav filter=lfs diff=lfs merge=lfs -text
625
+ wav/wav_1/00591.wav filter=lfs diff=lfs merge=lfs -text
626
+ wav/wav_1/00592.wav filter=lfs diff=lfs merge=lfs -text
627
+ wav/wav_1/00593.wav filter=lfs diff=lfs merge=lfs -text
628
+ wav/wav_1/00594.wav filter=lfs diff=lfs merge=lfs -text
629
+ wav/wav_1/00595.wav filter=lfs diff=lfs merge=lfs -text
630
+ wav/wav_1/00596.wav filter=lfs diff=lfs merge=lfs -text
631
+ wav/wav_1/00597.wav filter=lfs diff=lfs merge=lfs -text
632
+ wav/wav_1/00598.wav filter=lfs diff=lfs merge=lfs -text
633
+ wav/wav_1/00599.wav filter=lfs diff=lfs merge=lfs -text
634
+ wav/wav_1/00600.wav filter=lfs diff=lfs merge=lfs -text
635
+ wav/wav_1/00601.wav filter=lfs diff=lfs merge=lfs -text
636
+ wav/wav_1/00602.wav filter=lfs diff=lfs merge=lfs -text
637
+ wav/wav_1/00603.wav filter=lfs diff=lfs merge=lfs -text
638
+ wav/wav_1/00604.wav filter=lfs diff=lfs merge=lfs -text
639
+ wav/wav_1/00605.wav filter=lfs diff=lfs merge=lfs -text
640
+ wav/wav_1/00606.wav filter=lfs diff=lfs merge=lfs -text
641
+ wav/wav_1/00607.wav filter=lfs diff=lfs merge=lfs -text
642
+ wav/wav_1/00608.wav filter=lfs diff=lfs merge=lfs -text
643
+ wav/wav_1/00609.wav filter=lfs diff=lfs merge=lfs -text
644
+ wav/wav_1/00610.wav filter=lfs diff=lfs merge=lfs -text
645
+ wav/wav_1/00611.wav filter=lfs diff=lfs merge=lfs -text
646
+ wav/wav_1/00612.wav filter=lfs diff=lfs merge=lfs -text
647
+ wav/wav_1/00613.wav filter=lfs diff=lfs merge=lfs -text
648
+ wav/wav_1/00614.wav filter=lfs diff=lfs merge=lfs -text
649
+ wav/wav_1/00615.wav filter=lfs diff=lfs merge=lfs -text
650
+ wav/wav_1/00616.wav filter=lfs diff=lfs merge=lfs -text
651
+ wav/wav_1/00617.wav filter=lfs diff=lfs merge=lfs -text
652
+ wav/wav_1/00618.wav filter=lfs diff=lfs merge=lfs -text
653
+ wav/wav_1/00619.wav filter=lfs diff=lfs merge=lfs -text
654
+ wav/wav_1/00620.wav filter=lfs diff=lfs merge=lfs -text
655
+ wav/wav_1/00621.wav filter=lfs diff=lfs merge=lfs -text
656
+ wav/wav_1/00622.wav filter=lfs diff=lfs merge=lfs -text
657
+ wav/wav_1/00623.wav filter=lfs diff=lfs merge=lfs -text
658
+ wav/wav_1/00624.wav filter=lfs diff=lfs merge=lfs -text
659
+ wav/wav_1/00625.wav filter=lfs diff=lfs merge=lfs -text
660
+ wav/wav_1/00626.wav filter=lfs diff=lfs merge=lfs -text
661
+ wav/wav_1/00627.wav filter=lfs diff=lfs merge=lfs -text
662
+ wav/wav_1/00628.wav filter=lfs diff=lfs merge=lfs -text
663
+ wav/wav_1/00629.wav filter=lfs diff=lfs merge=lfs -text
664
+ wav/wav_1/00630.wav filter=lfs diff=lfs merge=lfs -text
665
+ wav/wav_1/00631.wav filter=lfs diff=lfs merge=lfs -text
666
+ wav/wav_1/00632.wav filter=lfs diff=lfs merge=lfs -text
667
+ wav/wav_1/00633.wav filter=lfs diff=lfs merge=lfs -text
668
+ wav/wav_1/00634.wav filter=lfs diff=lfs merge=lfs -text
669
+ wav/wav_1/00635.wav filter=lfs diff=lfs merge=lfs -text
670
+ wav/wav_1/00636.wav filter=lfs diff=lfs merge=lfs -text
671
+ wav/wav_1/00637.wav filter=lfs diff=lfs merge=lfs -text
672
+ wav/wav_1/00638.wav filter=lfs diff=lfs merge=lfs -text
673
+ wav/wav_1/00639.wav filter=lfs diff=lfs merge=lfs -text
674
+ wav/wav_1/00640.wav filter=lfs diff=lfs merge=lfs -text
675
+ wav/wav_1/00641.wav filter=lfs diff=lfs merge=lfs -text
676
+ wav/wav_1/00642.wav filter=lfs diff=lfs merge=lfs -text
677
+ wav/wav_1/00643.wav filter=lfs diff=lfs merge=lfs -text
678
+ wav/wav_1/00644.wav filter=lfs diff=lfs merge=lfs -text
679
+ wav/wav_1/00645.wav filter=lfs diff=lfs merge=lfs -text
680
+ wav/wav_1/00646.wav filter=lfs diff=lfs merge=lfs -text
681
+ wav/wav_1/00647.wav filter=lfs diff=lfs merge=lfs -text
682
+ wav/wav_1/00648.wav filter=lfs diff=lfs merge=lfs -text
683
+ wav/wav_1/00649.wav filter=lfs diff=lfs merge=lfs -text
684
+ wav/wav_1/00650.wav filter=lfs diff=lfs merge=lfs -text
685
+ wav/wav_1/00651.wav filter=lfs diff=lfs merge=lfs -text
686
+ wav/wav_1/00653.wav filter=lfs diff=lfs merge=lfs -text
687
+ wav/wav_1/00654.wav filter=lfs diff=lfs merge=lfs -text
688
+ wav/wav_1/00655.wav filter=lfs diff=lfs merge=lfs -text
689
+ wav/wav_1/00656.wav filter=lfs diff=lfs merge=lfs -text
690
+ wav/wav_1/00657.wav filter=lfs diff=lfs merge=lfs -text
691
+ wav/wav_1/00658.wav filter=lfs diff=lfs merge=lfs -text
692
+ wav/wav_1/00659.wav filter=lfs diff=lfs merge=lfs -text
693
+ wav/wav_1/00660.wav filter=lfs diff=lfs merge=lfs -text
694
+ wav/wav_1/00661.wav filter=lfs diff=lfs merge=lfs -text
695
+ wav/wav_1/00662.wav filter=lfs diff=lfs merge=lfs -text
696
+ wav/wav_1/00663.wav filter=lfs diff=lfs merge=lfs -text
697
+ wav/wav_1/00664.wav filter=lfs diff=lfs merge=lfs -text
698
+ wav/wav_1/00665.wav filter=lfs diff=lfs merge=lfs -text
699
+ wav/wav_1/00666.wav filter=lfs diff=lfs merge=lfs -text
700
+ wav/wav_1/00667.wav filter=lfs diff=lfs merge=lfs -text
701
+ wav/wav_1/00668.wav filter=lfs diff=lfs merge=lfs -text
702
+ wav/wav_1/00669.wav filter=lfs diff=lfs merge=lfs -text
703
+ wav/wav_1/00670.wav filter=lfs diff=lfs merge=lfs -text
704
+ wav/wav_1/00671.wav filter=lfs diff=lfs merge=lfs -text
705
+ wav/wav_1/00672.wav filter=lfs diff=lfs merge=lfs -text
706
+ wav/wav_1/00673.wav filter=lfs diff=lfs merge=lfs -text
707
+ wav/wav_1/00674.wav filter=lfs diff=lfs merge=lfs -text
708
+ wav/wav_1/00675.wav filter=lfs diff=lfs merge=lfs -text
709
+ wav/wav_1/00676.wav filter=lfs diff=lfs merge=lfs -text
710
+ wav/wav_1/00677.wav filter=lfs diff=lfs merge=lfs -text
711
+ wav/wav_1/00678.wav filter=lfs diff=lfs merge=lfs -text
712
+ wav/wav_1/00679.wav filter=lfs diff=lfs merge=lfs -text
713
+ wav/wav_1/00680.wav filter=lfs diff=lfs merge=lfs -text
714
+ wav/wav_1/00681.wav filter=lfs diff=lfs merge=lfs -text
715
+ wav/wav_1/00682.wav filter=lfs diff=lfs merge=lfs -text
716
+ wav/wav_1/00683.wav filter=lfs diff=lfs merge=lfs -text
717
+ wav/wav_1/00685.wav filter=lfs diff=lfs merge=lfs -text
718
+ wav/wav_1/00686.wav filter=lfs diff=lfs merge=lfs -text
719
+ wav/wav_1/00687.wav filter=lfs diff=lfs merge=lfs -text
720
+ wav/wav_1/00688.wav filter=lfs diff=lfs merge=lfs -text
721
+ wav/wav_1/00689.wav filter=lfs diff=lfs merge=lfs -text
722
+ wav/wav_1/00690.wav filter=lfs diff=lfs merge=lfs -text
723
+ wav/wav_1/00691.wav filter=lfs diff=lfs merge=lfs -text
724
+ wav/wav_1/00692.wav filter=lfs diff=lfs merge=lfs -text
725
+ wav/wav_1/00693.wav filter=lfs diff=lfs merge=lfs -text
726
+ wav/wav_1/00694.wav filter=lfs diff=lfs merge=lfs -text
727
+ wav/wav_1/00695.wav filter=lfs diff=lfs merge=lfs -text
728
+ wav/wav_1/00696.wav filter=lfs diff=lfs merge=lfs -text
729
+ wav/wav_1/00697.wav filter=lfs diff=lfs merge=lfs -text
730
+ wav/wav_1/00698.wav filter=lfs diff=lfs merge=lfs -text
731
+ wav/wav_1/00699.wav filter=lfs diff=lfs merge=lfs -text
732
+ wav/wav_1/00700.wav filter=lfs diff=lfs merge=lfs -text
733
+ wav/wav_1/00701.wav filter=lfs diff=lfs merge=lfs -text
734
+ wav/wav_1/00702.wav filter=lfs diff=lfs merge=lfs -text
735
+ wav/wav_1/00703.wav filter=lfs diff=lfs merge=lfs -text
736
+ wav/wav_1/00704.wav filter=lfs diff=lfs merge=lfs -text
737
+ wav/wav_1/00705.wav filter=lfs diff=lfs merge=lfs -text
738
+ wav/wav_1/00706.wav filter=lfs diff=lfs merge=lfs -text
739
+ wav/wav_1/00707.wav filter=lfs diff=lfs merge=lfs -text
740
+ wav/wav_1/00708.wav filter=lfs diff=lfs merge=lfs -text
741
+ wav/wav_1/00709.wav filter=lfs diff=lfs merge=lfs -text
742
+ wav/wav_1/00710.wav filter=lfs diff=lfs merge=lfs -text
743
+ wav/wav_1/00711.wav filter=lfs diff=lfs merge=lfs -text
744
+ wav/wav_1/00712.wav filter=lfs diff=lfs merge=lfs -text
745
+ wav/wav_1/00713.wav filter=lfs diff=lfs merge=lfs -text
746
+ wav/wav_1/00714.wav filter=lfs diff=lfs merge=lfs -text
747
+ wav/wav_1/00715.wav filter=lfs diff=lfs merge=lfs -text
748
+ wav/wav_1/00716.wav filter=lfs diff=lfs merge=lfs -text
749
+ wav/wav_1/00717.wav filter=lfs diff=lfs merge=lfs -text
750
+ wav/wav_1/00718.wav filter=lfs diff=lfs merge=lfs -text
751
+ wav/wav_1/00719.wav filter=lfs diff=lfs merge=lfs -text
752
+ wav/wav_1/00720.wav filter=lfs diff=lfs merge=lfs -text
753
+ wav/wav_1/00721.wav filter=lfs diff=lfs merge=lfs -text
754
+ wav/wav_1/00722.wav filter=lfs diff=lfs merge=lfs -text
755
+ wav/wav_1/00723.wav filter=lfs diff=lfs merge=lfs -text
756
+ wav/wav_1/00724.wav filter=lfs diff=lfs merge=lfs -text
757
+ wav/wav_1/00725.wav filter=lfs diff=lfs merge=lfs -text
758
+ wav/wav_1/00726.wav filter=lfs diff=lfs merge=lfs -text
759
+ wav/wav_1/00727.wav filter=lfs diff=lfs merge=lfs -text
760
+ wav/wav_1/00728.wav filter=lfs diff=lfs merge=lfs -text
761
+ wav/wav_1/00729.wav filter=lfs diff=lfs merge=lfs -text
762
+ wav/wav_1/00730.wav filter=lfs diff=lfs merge=lfs -text
763
+ wav/wav_1/00731.wav filter=lfs diff=lfs merge=lfs -text
764
+ wav/wav_1/00732.wav filter=lfs diff=lfs merge=lfs -text
765
+ wav/wav_1/00733.wav filter=lfs diff=lfs merge=lfs -text
766
+ wav/wav_1/00734.wav filter=lfs diff=lfs merge=lfs -text
767
+ wav/wav_1/00735.wav filter=lfs diff=lfs merge=lfs -text
768
+ wav/wav_1/00736.wav filter=lfs diff=lfs merge=lfs -text
769
+ wav/wav_1/00737.wav filter=lfs diff=lfs merge=lfs -text
770
+ wav/wav_1/00738.wav filter=lfs diff=lfs merge=lfs -text
771
+ wav/wav_1/00739.wav filter=lfs diff=lfs merge=lfs -text
772
+ wav/wav_1/00740.wav filter=lfs diff=lfs merge=lfs -text
773
+ wav/wav_1/00741.wav filter=lfs diff=lfs merge=lfs -text
774
+ wav/wav_1/00742.wav filter=lfs diff=lfs merge=lfs -text
775
+ wav/wav_1/00743.wav filter=lfs diff=lfs merge=lfs -text
776
+ wav/wav_1/00744.wav filter=lfs diff=lfs merge=lfs -text
777
+ wav/wav_1/00745.wav filter=lfs diff=lfs merge=lfs -text
778
+ wav/wav_1/00746.wav filter=lfs diff=lfs merge=lfs -text
779
+ wav/wav_1/00747.wav filter=lfs diff=lfs merge=lfs -text
780
+ wav/wav_1/00748.wav filter=lfs diff=lfs merge=lfs -text
781
+ wav/wav_1/00749.wav filter=lfs diff=lfs merge=lfs -text
782
+ wav/wav_1/00750.wav filter=lfs diff=lfs merge=lfs -text
783
+ wav/wav_1/00751.wav filter=lfs diff=lfs merge=lfs -text
784
+ wav/wav_1/00752.wav filter=lfs diff=lfs merge=lfs -text
785
+ wav/wav_1/00753.wav filter=lfs diff=lfs merge=lfs -text
786
+ wav/wav_1/00754.wav filter=lfs diff=lfs merge=lfs -text
787
+ wav/wav_1/00755.wav filter=lfs diff=lfs merge=lfs -text
788
+ wav/wav_1/00756.wav filter=lfs diff=lfs merge=lfs -text
789
+ wav/wav_1/00757.wav filter=lfs diff=lfs merge=lfs -text
790
+ wav/wav_1/00758.wav filter=lfs diff=lfs merge=lfs -text
791
+ wav/wav_1/00759.wav filter=lfs diff=lfs merge=lfs -text
792
+ wav/wav_1/00760.wav filter=lfs diff=lfs merge=lfs -text
793
+ wav/wav_1/00761.wav filter=lfs diff=lfs merge=lfs -text
794
+ wav/wav_1/00762.wav filter=lfs diff=lfs merge=lfs -text
795
+ wav/wav_1/00763.wav filter=lfs diff=lfs merge=lfs -text
796
+ wav/wav_1/00764.wav filter=lfs diff=lfs merge=lfs -text
797
+ wav/wav_1/00765.wav filter=lfs diff=lfs merge=lfs -text
798
+ wav/wav_1/00766.wav filter=lfs diff=lfs merge=lfs -text
799
+ wav/wav_1/00767.wav filter=lfs diff=lfs merge=lfs -text
800
+ wav/wav_1/00768.wav filter=lfs diff=lfs merge=lfs -text
801
+ wav/wav_1/00769.wav filter=lfs diff=lfs merge=lfs -text
802
+ wav/wav_1/00770.wav filter=lfs diff=lfs merge=lfs -text
803
+ wav/wav_1/00771.wav filter=lfs diff=lfs merge=lfs -text
804
+ wav/wav_1/00772.wav filter=lfs diff=lfs merge=lfs -text
805
+ wav/wav_1/00773.wav filter=lfs diff=lfs merge=lfs -text
806
+ wav/wav_1/00774.wav filter=lfs diff=lfs merge=lfs -text
807
+ wav/wav_1/00775.wav filter=lfs diff=lfs merge=lfs -text
808
+ wav/wav_1/00776.wav filter=lfs diff=lfs merge=lfs -text
809
+ wav/wav_1/00777.wav filter=lfs diff=lfs merge=lfs -text
810
+ wav/wav_1/00778.wav filter=lfs diff=lfs merge=lfs -text
811
+ wav/wav_1/00779.wav filter=lfs diff=lfs merge=lfs -text
812
+ wav/wav_1/00780.wav filter=lfs diff=lfs merge=lfs -text
813
+ wav/wav_1/00781.wav filter=lfs diff=lfs merge=lfs -text
814
+ wav/wav_1/00782.wav filter=lfs diff=lfs merge=lfs -text
815
+ wav/wav_1/00783.wav filter=lfs diff=lfs merge=lfs -text
816
+ wav/wav_1/00784.wav filter=lfs diff=lfs merge=lfs -text
817
+ wav/wav_1/00785.wav filter=lfs diff=lfs merge=lfs -text
818
+ wav/wav_1/00786.wav filter=lfs diff=lfs merge=lfs -text
819
+ wav/wav_1/00787.wav filter=lfs diff=lfs merge=lfs -text
820
+ wav/wav_1/00788.wav filter=lfs diff=lfs merge=lfs -text
821
+ wav/wav_1/00789.wav filter=lfs diff=lfs merge=lfs -text
822
+ wav/wav_1/00790.wav filter=lfs diff=lfs merge=lfs -text
823
+ wav/wav_1/00791.wav filter=lfs diff=lfs merge=lfs -text
824
+ wav/wav_1/00792.wav filter=lfs diff=lfs merge=lfs -text
825
+ wav/wav_1/00793.wav filter=lfs diff=lfs merge=lfs -text
826
+ wav/wav_1/00794.wav filter=lfs diff=lfs merge=lfs -text
827
+ wav/wav_1/00795.wav filter=lfs diff=lfs merge=lfs -text
828
+ wav/wav_1/00796.wav filter=lfs diff=lfs merge=lfs -text
829
+ wav/wav_1/00797.wav filter=lfs diff=lfs merge=lfs -text
830
+ wav/wav_1/00798.wav filter=lfs diff=lfs merge=lfs -text
831
+ wav/wav_1/00799.wav filter=lfs diff=lfs merge=lfs -text
832
+ wav/wav_1/00800.wav filter=lfs diff=lfs merge=lfs -text
833
+ wav/wav_1/00801.wav filter=lfs diff=lfs merge=lfs -text
834
+ wav/wav_1/00802.wav filter=lfs diff=lfs merge=lfs -text
835
+ wav/wav_1/00803.wav filter=lfs diff=lfs merge=lfs -text
836
+ wav/wav_1/00804.wav filter=lfs diff=lfs merge=lfs -text
837
+ wav/wav_1/00805.wav filter=lfs diff=lfs merge=lfs -text
838
+ wav/wav_1/00806.wav filter=lfs diff=lfs merge=lfs -text
839
+ wav/wav_1/00807.wav filter=lfs diff=lfs merge=lfs -text
840
+ wav/wav_1/00808.wav filter=lfs diff=lfs merge=lfs -text
841
+ wav/wav_1/00809.wav filter=lfs diff=lfs merge=lfs -text
842
+ wav/wav_1/00810.wav filter=lfs diff=lfs merge=lfs -text
843
+ wav/wav_1/00811.wav filter=lfs diff=lfs merge=lfs -text
844
+ wav/wav_1/00812.wav filter=lfs diff=lfs merge=lfs -text
845
+ wav/wav_1/00813.wav filter=lfs diff=lfs merge=lfs -text
846
+ wav/wav_1/00814.wav filter=lfs diff=lfs merge=lfs -text
847
+ wav/wav_1/00815.wav filter=lfs diff=lfs merge=lfs -text
848
+ wav/wav_1/00816.wav filter=lfs diff=lfs merge=lfs -text
849
+ wav/wav_1/00817.wav filter=lfs diff=lfs merge=lfs -text
850
+ wav/wav_1/00818.wav filter=lfs diff=lfs merge=lfs -text
851
+ wav/wav_1/00819.wav filter=lfs diff=lfs merge=lfs -text
852
+ wav/wav_1/00820.wav filter=lfs diff=lfs merge=lfs -text
853
+ wav/wav_1/00821.wav filter=lfs diff=lfs merge=lfs -text
854
+ wav/wav_1/00822.wav filter=lfs diff=lfs merge=lfs -text
855
+ wav/wav_1/00823.wav filter=lfs diff=lfs merge=lfs -text
856
+ wav/wav_1/00824.wav filter=lfs diff=lfs merge=lfs -text
857
+ wav/wav_1/00825.wav filter=lfs diff=lfs merge=lfs -text
858
+ wav/wav_1/00826.wav filter=lfs diff=lfs merge=lfs -text
859
+ wav/wav_1/00828.wav filter=lfs diff=lfs merge=lfs -text
860
+ wav/wav_1/00829.wav filter=lfs diff=lfs merge=lfs -text
861
+ wav/wav_1/00830.wav filter=lfs diff=lfs merge=lfs -text
862
+ wav/wav_1/00831.wav filter=lfs diff=lfs merge=lfs -text
863
+ wav/wav_1/00832.wav filter=lfs diff=lfs merge=lfs -text
864
+ wav/wav_1/00833.wav filter=lfs diff=lfs merge=lfs -text
865
+ wav/wav_1/00834.wav filter=lfs diff=lfs merge=lfs -text
866
+ wav/wav_1/00835.wav filter=lfs diff=lfs merge=lfs -text
867
+ wav/wav_1/00836.wav filter=lfs diff=lfs merge=lfs -text
868
+ wav/wav_1/00837.wav filter=lfs diff=lfs merge=lfs -text
869
+ wav/wav_1/00838.wav filter=lfs diff=lfs merge=lfs -text
870
+ wav/wav_1/00839.wav filter=lfs diff=lfs merge=lfs -text
871
+ wav/wav_1/00840.wav filter=lfs diff=lfs merge=lfs -text
872
+ wav/wav_1/00841.wav filter=lfs diff=lfs merge=lfs -text
873
+ wav/wav_1/00842.wav filter=lfs diff=lfs merge=lfs -text
874
+ wav/wav_1/00843.wav filter=lfs diff=lfs merge=lfs -text
875
+ wav/wav_1/00844.wav filter=lfs diff=lfs merge=lfs -text
876
+ wav/wav_1/00845.wav filter=lfs diff=lfs merge=lfs -text
877
+ wav/wav_1/00846.wav filter=lfs diff=lfs merge=lfs -text
878
+ wav/wav_1/00847.wav filter=lfs diff=lfs merge=lfs -text
879
+ wav/wav_1/00848.wav filter=lfs diff=lfs merge=lfs -text
880
+ wav/wav_1/00849.wav filter=lfs diff=lfs merge=lfs -text
881
+ wav/wav_1/00850.wav filter=lfs diff=lfs merge=lfs -text
882
+ wav/wav_1/00851.wav filter=lfs diff=lfs merge=lfs -text
883
+ wav/wav_1/00852.wav filter=lfs diff=lfs merge=lfs -text
884
+ wav/wav_1/00853.wav filter=lfs diff=lfs merge=lfs -text
885
+ wav/wav_1/00854.wav filter=lfs diff=lfs merge=lfs -text
886
+ wav/wav_1/00855.wav filter=lfs diff=lfs merge=lfs -text
887
+ wav/wav_1/00856.wav filter=lfs diff=lfs merge=lfs -text
888
+ wav/wav_1/00857.wav filter=lfs diff=lfs merge=lfs -text
889
+ wav/wav_1/00858.wav filter=lfs diff=lfs merge=lfs -text
890
+ wav/wav_1/00859.wav filter=lfs diff=lfs merge=lfs -text
891
+ wav/wav_1/00860.wav filter=lfs diff=lfs merge=lfs -text
892
+ wav/wav_1/00861.wav filter=lfs diff=lfs merge=lfs -text
893
+ wav/wav_1/00862.wav filter=lfs diff=lfs merge=lfs -text
894
+ wav/wav_1/00863.wav filter=lfs diff=lfs merge=lfs -text
895
+ wav/wav_1/00864.wav filter=lfs diff=lfs merge=lfs -text
896
+ wav/wav_1/00865.wav filter=lfs diff=lfs merge=lfs -text
897
+ wav/wav_1/00866.wav filter=lfs diff=lfs merge=lfs -text
898
+ wav/wav_1/00867.wav filter=lfs diff=lfs merge=lfs -text
899
+ wav/wav_1/00868.wav filter=lfs diff=lfs merge=lfs -text
900
+ wav/wav_1/00869.wav filter=lfs diff=lfs merge=lfs -text
901
+ wav/wav_1/00870.wav filter=lfs diff=lfs merge=lfs -text
902
+ wav/wav_1/00871.wav filter=lfs diff=lfs merge=lfs -text
903
+ wav/wav_1/00872.wav filter=lfs diff=lfs merge=lfs -text
904
+ wav/wav_1/00873.wav filter=lfs diff=lfs merge=lfs -text
905
+ wav/wav_1/00874.wav filter=lfs diff=lfs merge=lfs -text
906
+ wav/wav_1/00875.wav filter=lfs diff=lfs merge=lfs -text
907
+ wav/wav_1/00876.wav filter=lfs diff=lfs merge=lfs -text
908
+ wav/wav_1/00877.wav filter=lfs diff=lfs merge=lfs -text
909
+ wav/wav_1/00878.wav filter=lfs diff=lfs merge=lfs -text
910
+ wav/wav_1/00879.wav filter=lfs diff=lfs merge=lfs -text
911
+ wav/wav_1/00880.wav filter=lfs diff=lfs merge=lfs -text
912
+ wav/wav_1/00881.wav filter=lfs diff=lfs merge=lfs -text
913
+ wav/wav_1/00882.wav filter=lfs diff=lfs merge=lfs -text
914
+ wav/wav_1/00883.wav filter=lfs diff=lfs merge=lfs -text
915
+ wav/wav_1/00884.wav filter=lfs diff=lfs merge=lfs -text
916
+ wav/wav_1/00885.wav filter=lfs diff=lfs merge=lfs -text
917
+ wav/wav_1/00886.wav filter=lfs diff=lfs merge=lfs -text
918
+ wav/wav_1/00887.wav filter=lfs diff=lfs merge=lfs -text
919
+ wav/wav_1/00888.wav filter=lfs diff=lfs merge=lfs -text
920
+ wav/wav_1/00889.wav filter=lfs diff=lfs merge=lfs -text
921
+ wav/wav_1/00890.wav filter=lfs diff=lfs merge=lfs -text
922
+ wav/wav_1/00891.wav filter=lfs diff=lfs merge=lfs -text
923
+ wav/wav_1/00892.wav filter=lfs diff=lfs merge=lfs -text
924
+ wav/wav_1/00893.wav filter=lfs diff=lfs merge=lfs -text
925
+ wav/wav_1/00894.wav filter=lfs diff=lfs merge=lfs -text
926
+ wav/wav_1/00895.wav filter=lfs diff=lfs merge=lfs -text
927
+ wav/wav_1/00896.wav filter=lfs diff=lfs merge=lfs -text
928
+ wav/wav_1/00897.wav filter=lfs diff=lfs merge=lfs -text
929
+ wav/wav_1/00898.wav filter=lfs diff=lfs merge=lfs -text
930
+ wav/wav_1/00899.wav filter=lfs diff=lfs merge=lfs -text
931
+ wav/wav_1/00900.wav filter=lfs diff=lfs merge=lfs -text
932
+ wav/wav_1/00901.wav filter=lfs diff=lfs merge=lfs -text
933
+ wav/wav_1/00902.wav filter=lfs diff=lfs merge=lfs -text
934
+ wav/wav_1/00903.wav filter=lfs diff=lfs merge=lfs -text
935
+ wav/wav_1/00904.wav filter=lfs diff=lfs merge=lfs -text
936
+ wav/wav_1/00905.wav filter=lfs diff=lfs merge=lfs -text
937
+ wav/wav_1/00906.wav filter=lfs diff=lfs merge=lfs -text
938
+ wav/wav_1/00907.wav filter=lfs diff=lfs merge=lfs -text
939
+ wav/wav_1/00908.wav filter=lfs diff=lfs merge=lfs -text
940
+ wav/wav_1/00909.wav filter=lfs diff=lfs merge=lfs -text
941
+ wav/wav_1/00910.wav filter=lfs diff=lfs merge=lfs -text
942
+ wav/wav_1/00911.wav filter=lfs diff=lfs merge=lfs -text
943
+ wav/wav_1/00912.wav filter=lfs diff=lfs merge=lfs -text
944
+ wav/wav_1/00913.wav filter=lfs diff=lfs merge=lfs -text
945
+ wav/wav_1/00914.wav filter=lfs diff=lfs merge=lfs -text
946
+ wav/wav_1/00915.wav filter=lfs diff=lfs merge=lfs -text
947
+ wav/wav_1/00916.wav filter=lfs diff=lfs merge=lfs -text
948
+ wav/wav_1/00917.wav filter=lfs diff=lfs merge=lfs -text
949
+ wav/wav_1/00918.wav filter=lfs diff=lfs merge=lfs -text
950
+ wav/wav_1/00919.wav filter=lfs diff=lfs merge=lfs -text
951
+ wav/wav_1/00920.wav filter=lfs diff=lfs merge=lfs -text
952
+ wav/wav_1/00921.wav filter=lfs diff=lfs merge=lfs -text
953
+ wav/wav_1/00922.wav filter=lfs diff=lfs merge=lfs -text
954
+ wav/wav_1/00923.wav filter=lfs diff=lfs merge=lfs -text
955
+ wav/wav_1/00924.wav filter=lfs diff=lfs merge=lfs -text
956
+ wav/wav_1/00925.wav filter=lfs diff=lfs merge=lfs -text
957
+ wav/wav_1/00926.wav filter=lfs diff=lfs merge=lfs -text
958
+ wav/wav_1/00927.wav filter=lfs diff=lfs merge=lfs -text
959
+ wav/wav_1/00928.wav filter=lfs diff=lfs merge=lfs -text
960
+ wav/wav_1/00929.wav filter=lfs diff=lfs merge=lfs -text
961
+ wav/wav_1/00930.wav filter=lfs diff=lfs merge=lfs -text
962
+ wav/wav_1/00931.wav filter=lfs diff=lfs merge=lfs -text
963
+ wav/wav_1/00932.wav filter=lfs diff=lfs merge=lfs -text
964
+ wav/wav_1/00933.wav filter=lfs diff=lfs merge=lfs -text
965
+ wav/wav_1/00934.wav filter=lfs diff=lfs merge=lfs -text
966
+ wav/wav_1/00935.wav filter=lfs diff=lfs merge=lfs -text
967
+ wav/wav_1/00936.wav filter=lfs diff=lfs merge=lfs -text
968
+ wav/wav_1/00937.wav filter=lfs diff=lfs merge=lfs -text
969
+ wav/wav_1/00938.wav filter=lfs diff=lfs merge=lfs -text
970
+ wav/wav_1/00939.wav filter=lfs diff=lfs merge=lfs -text
971
+ wav/wav_1/00940.wav filter=lfs diff=lfs merge=lfs -text
972
+ wav/wav_1/00941.wav filter=lfs diff=lfs merge=lfs -text
973
+ wav/wav_1/00942.wav filter=lfs diff=lfs merge=lfs -text
974
+ wav/wav_1/00943.wav filter=lfs diff=lfs merge=lfs -text
975
+ wav/wav_1/00944.wav filter=lfs diff=lfs merge=lfs -text
976
+ wav/wav_1/00945.wav filter=lfs diff=lfs merge=lfs -text
977
+ wav/wav_1/00946.wav filter=lfs diff=lfs merge=lfs -text
978
+ wav/wav_1/00947.wav filter=lfs diff=lfs merge=lfs -text
979
+ wav/wav_1/00948.wav filter=lfs diff=lfs merge=lfs -text
980
+ wav/wav_1/00949.wav filter=lfs diff=lfs merge=lfs -text
981
+ wav/wav_1/00950.wav filter=lfs diff=lfs merge=lfs -text
982
+ wav/wav_1/00951.wav filter=lfs diff=lfs merge=lfs -text
983
+ wav/wav_1/00952.wav filter=lfs diff=lfs merge=lfs -text
984
+ wav/wav_1/00953.wav filter=lfs diff=lfs merge=lfs -text
985
+ wav/wav_1/00954.wav filter=lfs diff=lfs merge=lfs -text
986
+ wav/wav_1/00955.wav filter=lfs diff=lfs merge=lfs -text
987
+ wav/wav_1/00956.wav filter=lfs diff=lfs merge=lfs -text
988
+ wav/wav_1/00957.wav filter=lfs diff=lfs merge=lfs -text
989
+ wav/wav_1/00958.wav filter=lfs diff=lfs merge=lfs -text
990
+ wav/wav_1/00959.wav filter=lfs diff=lfs merge=lfs -text
991
+ wav/wav_1/00960.wav filter=lfs diff=lfs merge=lfs -text
992
+ wav/wav_1/00961.wav filter=lfs diff=lfs merge=lfs -text
993
+ wav/wav_1/00962.wav filter=lfs diff=lfs merge=lfs -text
994
+ wav/wav_1/00963.wav filter=lfs diff=lfs merge=lfs -text
995
+ wav/wav_1/00964.wav filter=lfs diff=lfs merge=lfs -text
996
+ wav/wav_1/00965.wav filter=lfs diff=lfs merge=lfs -text
997
+ wav/wav_1/00966.wav filter=lfs diff=lfs merge=lfs -text
998
+ wav/wav_1/00967.wav filter=lfs diff=lfs merge=lfs -text
999
+ wav/wav_1/00968.wav filter=lfs diff=lfs merge=lfs -text
1000
+ wav/wav_1/00969.wav filter=lfs diff=lfs merge=lfs -text
1001
+ wav/wav_1/00970.wav filter=lfs diff=lfs merge=lfs -text
1002
+ wav/wav_1/00971.wav filter=lfs diff=lfs merge=lfs -text
1003
+ wav/wav_1/00972.wav filter=lfs diff=lfs merge=lfs -text
1004
+ wav/wav_1/00973.wav filter=lfs diff=lfs merge=lfs -text
1005
+ wav/wav_1/00974.wav filter=lfs diff=lfs merge=lfs -text
1006
+ wav/wav_1/00975.wav filter=lfs diff=lfs merge=lfs -text
1007
+ wav/wav_1/00976.wav filter=lfs diff=lfs merge=lfs -text
1008
+ wav/wav_1/00977.wav filter=lfs diff=lfs merge=lfs -text
1009
+ wav/wav_1/00978.wav filter=lfs diff=lfs merge=lfs -text
1010
+ wav/wav_1/00979.wav filter=lfs diff=lfs merge=lfs -text
1011
+ wav/wav_1/00980.wav filter=lfs diff=lfs merge=lfs -text
1012
+ wav/wav_1/00981.wav filter=lfs diff=lfs merge=lfs -text
1013
+ wav/wav_1/00982.wav filter=lfs diff=lfs merge=lfs -text
1014
+ wav/wav_1/00983.wav filter=lfs diff=lfs merge=lfs -text
1015
+ wav/wav_1/00984.wav filter=lfs diff=lfs merge=lfs -text
1016
+ wav/wav_1/00985.wav filter=lfs diff=lfs merge=lfs -text
1017
+ wav/wav_1/00986.wav filter=lfs diff=lfs merge=lfs -text
1018
+ wav/wav_1/00987.wav filter=lfs diff=lfs merge=lfs -text
1019
+ wav/wav_1/00988.wav filter=lfs diff=lfs merge=lfs -text
1020
+ wav/wav_1/00989.wav filter=lfs diff=lfs merge=lfs -text
1021
+ wav/wav_1/00990.wav filter=lfs diff=lfs merge=lfs -text
1022
+ wav/wav_1/00991.wav filter=lfs diff=lfs merge=lfs -text
1023
+ wav/wav_1/00992.wav filter=lfs diff=lfs merge=lfs -text
1024
+ wav/wav_1/00993.wav filter=lfs diff=lfs merge=lfs -text
1025
+ wav/wav_1/00994.wav filter=lfs diff=lfs merge=lfs -text
1026
+ wav/wav_1/00995.wav filter=lfs diff=lfs merge=lfs -text
1027
+ wav/wav_1/00996.wav filter=lfs diff=lfs merge=lfs -text
1028
+ wav/wav_1/00997.wav filter=lfs diff=lfs merge=lfs -text
1029
+ wav/wav_1/00998.wav filter=lfs diff=lfs merge=lfs -text
1030
+ wav/wav_1/00999.wav filter=lfs diff=lfs merge=lfs -text
1031
+ wav/wav_1/01000.wav filter=lfs diff=lfs merge=lfs -text
1032
+ wav/wav_1/output.wav filter=lfs diff=lfs merge=lfs -text
Attention.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Written by Shigeki Karita, 2019
2
+ # Published under Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
3
+ # Adapted by Florian Lux, 2021
4
+
5
+ """Multi-Head Attention layer definition."""
6
+
7
+ import math
8
+
9
+ import numpy
10
+ import torch
11
+ from torch import nn
12
+
13
+
14
+ from utils import make_non_pad_mask
15
+
16
+
17
+ class MultiHeadedAttention(nn.Module):
18
+ """
19
+ Multi-Head Attention layer.
20
+
21
+ Args:
22
+ n_head (int): The number of heads.
23
+ n_feat (int): The number of features.
24
+ dropout_rate (float): Dropout rate.
25
+ """
26
+
27
+ def __init__(self, n_head, n_feat, dropout_rate):
28
+ """
29
+ Construct an MultiHeadedAttention object.
30
+ """
31
+ super(MultiHeadedAttention, self).__init__()
32
+ assert n_feat % n_head == 0
33
+ # We assume d_v always equals d_k
34
+ self.d_k = n_feat // n_head
35
+ self.h = n_head
36
+ self.linear_q = nn.Linear(n_feat, n_feat)
37
+ self.linear_k = nn.Linear(n_feat, n_feat)
38
+ self.linear_v = nn.Linear(n_feat, n_feat)
39
+ self.linear_out = nn.Linear(n_feat, n_feat)
40
+ self.attn = None
41
+ self.dropout = nn.Dropout(p=dropout_rate)
42
+
43
+ def forward_qkv(self, query, key, value):
44
+ """
45
+ Transform query, key and value.
46
+
47
+ Args:
48
+ query (torch.Tensor): Query tensor (#batch, time1, size).
49
+ key (torch.Tensor): Key tensor (#batch, time2, size).
50
+ value (torch.Tensor): Value tensor (#batch, time2, size).
51
+
52
+ Returns:
53
+ torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
54
+ torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
55
+ torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
56
+ """
57
+ n_batch = query.size(0)
58
+ q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
59
+ k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
60
+ v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
61
+ q = q.transpose(1, 2) # (batch, head, time1, d_k)
62
+ k = k.transpose(1, 2) # (batch, head, time2, d_k)
63
+ v = v.transpose(1, 2) # (batch, head, time2, d_k)
64
+
65
+ return q, k, v
66
+
67
+ def forward_attention(self, value, scores, mask):
68
+ """
69
+ Compute attention context vector.
70
+
71
+ Args:
72
+ value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
73
+ scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
74
+ mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
75
+
76
+ Returns:
77
+ torch.Tensor: Transformed value (#batch, time1, d_model)
78
+ weighted by the attention score (#batch, time1, time2).
79
+ """
80
+ n_batch = value.size(0)
81
+ if mask is not None:
82
+ mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
83
+ min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
84
+ scores = scores.masked_fill(mask, min_value)
85
+ self.attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) # (batch, head, time1, time2)
86
+ else:
87
+ self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
88
+
89
+ p_attn = self.dropout(self.attn)
90
+ x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
91
+ x = (x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)) # (batch, time1, d_model)
92
+
93
+ return self.linear_out(x) # (batch, time1, d_model)
94
+
95
+ def forward(self, query, key, value, mask):
96
+ """
97
+ Compute scaled dot product attention.
98
+
99
+ Args:
100
+ query (torch.Tensor): Query tensor (#batch, time1, size).
101
+ key (torch.Tensor): Key tensor (#batch, time2, size).
102
+ value (torch.Tensor): Value tensor (#batch, time2, size).
103
+ mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
104
+ (#batch, time1, time2).
105
+
106
+ Returns:
107
+ torch.Tensor: Output tensor (#batch, time1, d_model).
108
+ """
109
+ q, k, v = self.forward_qkv(query, key, value)
110
+ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
111
+ return self.forward_attention(v, scores, mask)
112
+
113
+
114
+ class RelPositionMultiHeadedAttention(MultiHeadedAttention):
115
+ """
116
+ Multi-Head Attention layer with relative position encoding.
117
+ Details can be found in https://github.com/espnet/espnet/pull/2816.
118
+ Paper: https://arxiv.org/abs/1901.02860
119
+ Args:
120
+ n_head (int): The number of heads.
121
+ n_feat (int): The number of features.
122
+ dropout_rate (float): Dropout rate.
123
+ zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
124
+ """
125
+
126
+ def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):
127
+ """Construct an RelPositionMultiHeadedAttention object."""
128
+ super().__init__(n_head, n_feat, dropout_rate)
129
+ self.zero_triu = zero_triu
130
+ # linear transformation for positional encoding
131
+ self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
132
+ # these two learnable bias are used in matrix c and matrix d
133
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
134
+ self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
135
+ self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
136
+ torch.nn.init.xavier_uniform_(self.pos_bias_u)
137
+ torch.nn.init.xavier_uniform_(self.pos_bias_v)
138
+
139
+ def rel_shift(self, x):
140
+ """
141
+ Compute relative positional encoding.
142
+ Args:
143
+ x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
144
+ time1 means the length of query vector.
145
+ Returns:
146
+ torch.Tensor: Output tensor.
147
+ """
148
+ zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
149
+ x_padded = torch.cat([zero_pad, x], dim=-1)
150
+
151
+ x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
152
+ x = x_padded[:, :, 1:].view_as(x)[:, :, :, : x.size(-1) // 2 + 1] # only keep the positions from 0 to time2
153
+
154
+ if self.zero_triu:
155
+ ones = torch.ones((x.size(2), x.size(3)), device=x.device)
156
+ x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
157
+
158
+ return x
159
+
160
+ def forward(self, query, key, value, pos_emb, mask):
161
+ """
162
+ Compute 'Scaled Dot Product Attention' with rel. positional encoding.
163
+ Args:
164
+ query (torch.Tensor): Query tensor (#batch, time1, size).
165
+ key (torch.Tensor): Key tensor (#batch, time2, size).
166
+ value (torch.Tensor): Value tensor (#batch, time2, size).
167
+ pos_emb (torch.Tensor): Positional embedding tensor
168
+ (#batch, 2*time1-1, size).
169
+ mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
170
+ (#batch, time1, time2).
171
+ Returns:
172
+ torch.Tensor: Output tensor (#batch, time1, d_model).
173
+ """
174
+ q, k, v = self.forward_qkv(query, key, value)
175
+ q = q.transpose(1, 2) # (batch, time1, head, d_k)
176
+
177
+ n_batch_pos = pos_emb.size(0)
178
+ p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
179
+ p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)
180
+
181
+ # (batch, head, time1, d_k)
182
+ q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
183
+ # (batch, head, time1, d_k)
184
+ q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
185
+
186
+ # compute attention score
187
+ # first compute matrix a and matrix c
188
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
189
+ # (batch, head, time1, time2)
190
+ matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
191
+
192
+ # compute matrix b and matrix d
193
+ # (batch, head, time1, 2*time1-1)
194
+ matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
195
+ matrix_bd = self.rel_shift(matrix_bd)
196
+
197
+ scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k) # (batch, head, time1, time2)
198
+
199
+ return self.forward_attention(v, scores, mask)
200
+
201
+
202
+ class GuidedAttentionLoss(torch.nn.Module):
203
+ """
204
+ Guided attention loss function module.
205
+
206
+ This module calculates the guided attention loss described
207
+ in `Efficiently Trainable Text-to-Speech System Based
208
+ on Deep Convolutional Networks with Guided Attention`_,
209
+ which forces the attention to be diagonal.
210
+
211
+ .. _`Efficiently Trainable Text-to-Speech System
212
+ Based on Deep Convolutional Networks with Guided Attention`:
213
+ https://arxiv.org/abs/1710.08969
214
+ """
215
+
216
+ def __init__(self, sigma=0.4, alpha=1.0):
217
+ """
218
+ Initialize guided attention loss module.
219
+
220
+ Args:
221
+ sigma (float, optional): Standard deviation to control
222
+ how close attention to a diagonal.
223
+ alpha (float, optional): Scaling coefficient (lambda).
224
+ reset_always (bool, optional): Whether to always reset masks.
225
+ """
226
+ super(GuidedAttentionLoss, self).__init__()
227
+ self.sigma = sigma
228
+ self.alpha = alpha
229
+ self.guided_attn_masks = None
230
+ self.masks = None
231
+
232
+ def _reset_masks(self):
233
+ self.guided_attn_masks = None
234
+ self.masks = None
235
+
236
+ def forward(self, att_ws, ilens, olens):
237
+ """
238
+ Calculate forward propagation.
239
+
240
+ Args:
241
+ att_ws (Tensor): Batch of attention weights (B, T_max_out, T_max_in).
242
+ ilens (LongTensor): Batch of input lenghts (B,).
243
+ olens (LongTensor): Batch of output lenghts (B,).
244
+
245
+ Returns:
246
+ Tensor: Guided attention loss value.
247
+ """
248
+ self._reset_masks()
249
+ self.guided_attn_masks = self._make_guided_attention_masks(ilens, olens).to(att_ws.device)
250
+ self.masks = self._make_masks(ilens, olens).to(att_ws.device)
251
+ losses = self.guided_attn_masks * att_ws
252
+ loss = torch.mean(losses.masked_select(self.masks))
253
+ self._reset_masks()
254
+ return self.alpha * loss
255
+
256
+ def _make_guided_attention_masks(self, ilens, olens):
257
+ n_batches = len(ilens)
258
+ max_ilen = max(ilens)
259
+ max_olen = max(olens)
260
+ guided_attn_masks = torch.zeros((n_batches, max_olen, max_ilen), device=ilens.device)
261
+ for idx, (ilen, olen) in enumerate(zip(ilens, olens)):
262
+ guided_attn_masks[idx, :olen, :ilen] = self._make_guided_attention_mask(ilen, olen, self.sigma)
263
+ return guided_attn_masks
264
+
265
+ @staticmethod
266
+ def _make_guided_attention_mask(ilen, olen, sigma):
267
+ """
268
+ Make guided attention mask.
269
+ """
270
+ grid_x, grid_y = torch.meshgrid(torch.arange(olen, device=olen.device).float(), torch.arange(ilen, device=ilen.device).float())
271
+ return 1.0 - torch.exp(-((grid_y / ilen - grid_x / olen) ** 2) / (2 * (sigma ** 2)))
272
+
273
+ @staticmethod
274
+ def _make_masks(ilens, olens):
275
+ """
276
+ Make masks indicating non-padded part.
277
+
278
+ Args:
279
+ ilens (LongTensor or List): Batch of lengths (B,).
280
+ olens (LongTensor or List): Batch of lengths (B,).
281
+
282
+ Returns:
283
+ Tensor: Mask tensor indicating non-padded part.
284
+ dtype=torch.uint8 in PyTorch 1.2-
285
+ dtype=torch.bool in PyTorch 1.2+ (including 1.2)
286
+ """
287
+ in_masks = make_non_pad_mask(ilens, device=ilens.device) # (B, T_in)
288
+ out_masks = make_non_pad_mask(olens, device=olens.device) # (B, T_out)
289
+ return out_masks.unsqueeze(-1) & in_masks.unsqueeze(-2) # (B, T_out, T_in)
290
+
291
+
292
+ class GuidedMultiHeadAttentionLoss(GuidedAttentionLoss):
293
+ """
294
+ Guided attention loss function module for multi head attention.
295
+
296
+ Args:
297
+ sigma (float, optional): Standard deviation to control
298
+ how close attention to a diagonal.
299
+ alpha (float, optional): Scaling coefficient (lambda).
300
+ reset_always (bool, optional): Whether to always reset masks.
301
+ """
302
+
303
+ def forward(self, att_ws, ilens, olens):
304
+ """
305
+ Calculate forward propagation.
306
+
307
+ Args:
308
+ att_ws (Tensor):
309
+ Batch of multi head attention weights (B, H, T_max_out, T_max_in).
310
+ ilens (LongTensor): Batch of input lenghts (B,).
311
+ olens (LongTensor): Batch of output lenghts (B,).
312
+
313
+ Returns:
314
+ Tensor: Guided attention loss value.
315
+ """
316
+ if self.guided_attn_masks is None:
317
+ self.guided_attn_masks = (self._make_guided_attention_masks(ilens, olens).to(att_ws.device).unsqueeze(1))
318
+ if self.masks is None:
319
+ self.masks = self._make_masks(ilens, olens).to(att_ws.device).unsqueeze(1)
320
+ losses = self.guided_attn_masks * att_ws
321
+ loss = torch.mean(losses.masked_select(self.masks))
322
+ if self.reset_always:
323
+ self._reset_masks()
324
+
325
+ return self.alpha * loss
__pycache__/Attention.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
__pycache__/attentions.cpython-310.pyc ADDED
Binary file (9.57 kB). View file
 
__pycache__/commons.cpython-310.pyc ADDED
Binary file (5.75 kB). View file
 
__pycache__/data_utils.cpython-310.pyc ADDED
Binary file (16.8 kB). View file
 
__pycache__/mel_processing.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
__pycache__/models_mel_style.cpython-310.pyc ADDED
Binary file (25.5 kB). View file
 
__pycache__/modules.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
__pycache__/transforms.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
__pycache__/utils.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
app_gradio.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ import math
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+ import librosa
9
+ import argparse
10
+ import librosa.display
11
+ import matplotlib.pyplot as plt
12
+
13
+
14
+ import commons
15
+ import utils
16
+ from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate
17
+ #from model_old_mel_style import SynthesizerTrn
18
+ from models_mel_style import SynthesizerTrn
19
+ from text.symbols import symbols
20
+ from text import text_to_sequence
21
+ from mel_processing import spectrogram_torch, spec_to_mel_torch
22
+ from scipy.io.wavfile import write
23
+
24
+ # Thư mục chứa các file wav
25
+ AUDIO_DIR = "wav/wav_1"
26
+
27
+ def list_wav_files():
28
+ return [f for f in os.listdir(AUDIO_DIR) if f.endswith(".wav")]
29
+
30
+ # Trả về đường dẫn file wav được chọn
31
+ def get_audio_file(file_name):
32
+ file_path = os.path.join(AUDIO_DIR, file_name)
33
+ return file_path
34
+
35
+ def get_text(text, hps):
36
+ text_norm = text_to_sequence(text, hps.data.text_cleaners)
37
+ if hps.data.add_blank:
38
+ text_norm = commons.intersperse(text_norm, 0)
39
+ text_norm = torch.LongTensor(text_norm)
40
+ return text_norm
41
+
42
+ # Tạo giọng nói bằng mô hình
43
+ def generate_voice(prompt_text, ref_audio_filename):
44
+ import argparse
45
+ class Args:
46
+ checkpoint_path = "logs/large_audio/G_504000.pth"
47
+ config = "configs/vn_base.json"
48
+ save_path = "infer_result/"
49
+ ref_audio = os.path.join("wav/wav_1", ref_audio_filename)
50
+ text = prompt_text
51
+ args = Args()
52
+
53
+ hps = utils.get_hparams_from_file(args.config)
54
+ net_g = SynthesizerTrn(
55
+ len(symbols),
56
+ hps.data.filter_length // 2 + 1,
57
+ hps.train.segment_size // hps.data.hop_length,
58
+ n_speakers=0,
59
+ **hps.model
60
+ )
61
+ _ = net_g.eval()
62
+ _ = utils.load_checkpoint(args.checkpoint_path, net_g, None)
63
+
64
+ audio, _ = librosa.load(args.ref_audio, sr=hps.data.sampling_rate)
65
+ audio = torch.from_numpy(audio).unsqueeze(0)
66
+ spec = spectrogram_torch(audio, hps.data.filter_length, hps.data.sampling_rate,
67
+ hps.data.hop_length, hps.data.win_length, center=False)
68
+ spec = torch.squeeze(spec, 0)
69
+ mel = spec_to_mel_torch(spec, hps.data.filter_length, hps.data.n_mel_channels,
70
+ hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax)
71
+
72
+ stn_tst = get_text(args.text, hps)
73
+ x_tst = stn_tst.unsqueeze(0)
74
+ x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
75
+ sid = torch.LongTensor([4])
76
+
77
+ with torch.no_grad():
78
+ audio_gen = net_g.infer(x_tst, x_tst_lengths, mel.unsqueeze(0),
79
+ sid=None, noise_scale=0.1,
80
+ noise_scale_w=0.1, length_scale=1.1)[0][0, 0].data.cpu().float().numpy()
81
+
82
+ os.makedirs(args.save_path, exist_ok=True)
83
+ output_file = os.path.join(args.save_path, f'test_{str(len(os.listdir(args.save_path)))}.wav')
84
+ write(output_file, hps.data.sampling_rate, audio_gen)
85
+
86
+ return output_file
87
+
88
+ # with gr.Blocks() as demo:
89
+ # gr.Markdown("<center># <h1>Demo Model Text to Speech</h1></center>")
90
+
91
+ # prompt = gr.Textbox(label="Prompt", placeholder="Type somethrgs.ing here...")
92
+
93
+ # wav_files = sorted(list_wav_files())
94
+ # if not wav_files:
95
+ # gr.Markdown("⚠️ Không tìm thấy file .wav nào trong thư mục!")
96
+
97
+ # gr.Markdown("## 🎧 Chọn và nghe file âm thanh gốc")
98
+
99
+ # with gr.Row():
100
+ # file_dropdown = gr.Dropdown(choices=wav_files, label="Chọn file WAV")
101
+ # audio_output = gr.Audio(type="filepath", label="Nghe tại đây")
102
+
103
+ # file_dropdown.change(fn=get_audio_file, inputs=file_dropdown, outputs=audio_output)
104
+
105
+ # generate_button = gr.Button("Generate Voice")
106
+
107
+ # generated_audio_output = gr.Audio(type="filepath", label="🔊 Kết quả sinh giọng nói")
108
+ # generate_button.click(fn=generate_voice, inputs=[prompt, file_dropdown], outputs=generated_audio_output)
109
+
110
+ with gr.Blocks() as demo:
111
+ gr.Markdown("<center># <h1>Demo Model Text to Speech</h1></center>")
112
+
113
+ prompt = gr.Textbox(label="Prompt", placeholder="Type something here...")
114
+
115
+ gr.Markdown("## 🎧 Chọn hoặc ghi âm giọng nói tham chiếu")
116
+
117
+ with gr.Tab("📁 Chọn từ file"):
118
+ wav_files = sorted(list_wav_files())
119
+ file_dropdown = gr.Dropdown(choices=wav_files, label="Chọn file WAV có sẵn")
120
+ audio_output = gr.Audio(type="filepath", label="Nghe tại đây")
121
+ file_dropdown.change(fn=get_audio_file, inputs=file_dropdown, outputs=audio_output)
122
+
123
+ with gr.Tab("🎙️ Ghi âm mới"):
124
+ recorded_audio = gr.Audio(label="Ghi âm hoặc chọn file", type="filepath")
125
+
126
+ # Nút sinh giọng nói
127
+ generate_button = gr.Button("Generate Voice")
128
+ generated_audio_output = gr.Audio(type="filepath", label="🔊 Kết quả sinh giọng nói")
129
+
130
+ def process_inputs(prompt_text, file_choice, recorded_path):
131
+ # Nếu người dùng có file ghi âm -> lưu tạm và dùng
132
+ if recorded_path is not None:
133
+ filename = f"user_recording_{len(os.listdir(AUDIO_DIR))}.wav"
134
+ saved_path = os.path.join(AUDIO_DIR, filename)
135
+ os.rename(recorded_path, saved_path)
136
+ ref_file = filename
137
+ elif file_choice:
138
+ ref_file = file_choice
139
+ else:
140
+ raise gr.Error("Bạn cần chọn hoặc ghi âm một file giọng nói.")
141
+
142
+ return generate_voice(prompt_text, ref_file)
143
+
144
+ generate_button.click(
145
+ fn=process_inputs,
146
+ inputs=[prompt, file_dropdown, recorded_audio],
147
+ outputs=generated_audio_output
148
+ )
149
+
150
+ demo.launch()
attentions.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+
8
+ import commons
9
+ import modules
10
+ from modules import LayerNorm
11
+
12
+
13
+ class Encoder(nn.Module):
14
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
15
+ super().__init__()
16
+ self.hidden_channels = hidden_channels
17
+ self.filter_channels = filter_channels
18
+ self.n_heads = n_heads
19
+ self.n_layers = n_layers
20
+ self.kernel_size = kernel_size
21
+ self.p_dropout = p_dropout
22
+ self.window_size = window_size
23
+
24
+ self.drop = nn.Dropout(p_dropout)
25
+ self.attn_layers = nn.ModuleList()
26
+ self.norm_layers_1 = nn.ModuleList()
27
+ self.ffn_layers = nn.ModuleList()
28
+ self.norm_layers_2 = nn.ModuleList()
29
+ for i in range(self.n_layers):
30
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
31
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
32
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
33
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
34
+
35
+ def forward(self, x, x_mask):
36
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
37
+ x = x * x_mask
38
+ for i in range(self.n_layers):
39
+ y = self.attn_layers[i](x, x, attn_mask)
40
+ y = self.drop(y)
41
+ x = self.norm_layers_1[i](x + y)
42
+
43
+ y = self.ffn_layers[i](x, x_mask)
44
+ y = self.drop(y)
45
+ x = self.norm_layers_2[i](x + y)
46
+ x = x * x_mask
47
+ return x
48
+
49
+
50
+ class Decoder(nn.Module):
51
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
52
+ super().__init__()
53
+ self.hidden_channels = hidden_channels
54
+ self.filter_channels = filter_channels
55
+ self.n_heads = n_heads
56
+ self.n_layers = n_layers
57
+ self.kernel_size = kernel_size
58
+ self.p_dropout = p_dropout
59
+ self.proximal_bias = proximal_bias
60
+ self.proximal_init = proximal_init
61
+
62
+ self.drop = nn.Dropout(p_dropout)
63
+ self.self_attn_layers = nn.ModuleList()
64
+ self.norm_layers_0 = nn.ModuleList()
65
+ self.encdec_attn_layers = nn.ModuleList()
66
+ self.norm_layers_1 = nn.ModuleList()
67
+ self.ffn_layers = nn.ModuleList()
68
+ self.norm_layers_2 = nn.ModuleList()
69
+ for i in range(self.n_layers):
70
+ self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
71
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
72
+ self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
73
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
74
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
75
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
76
+
77
+ def forward(self, x, x_mask, h, h_mask):
78
+ """
79
+ x: decoder input
80
+ h: encoder output
81
+ """
82
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
83
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
84
+ x = x * x_mask
85
+ for i in range(self.n_layers):
86
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
87
+ y = self.drop(y)
88
+ x = self.norm_layers_0[i](x + y)
89
+
90
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
91
+ y = self.drop(y)
92
+ x = self.norm_layers_1[i](x + y)
93
+
94
+ y = self.ffn_layers[i](x, x_mask)
95
+ y = self.drop(y)
96
+ x = self.norm_layers_2[i](x + y)
97
+ x = x * x_mask
98
+ return x
99
+
100
+
101
+ class MultiHeadAttention(nn.Module):
102
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
103
+ super().__init__()
104
+ assert channels % n_heads == 0
105
+
106
+ self.channels = channels
107
+ self.out_channels = out_channels
108
+ self.n_heads = n_heads
109
+ self.p_dropout = p_dropout
110
+ self.window_size = window_size
111
+ self.heads_share = heads_share
112
+ self.block_length = block_length
113
+ self.proximal_bias = proximal_bias
114
+ self.proximal_init = proximal_init
115
+ self.attn = None
116
+
117
+ self.k_channels = channels // n_heads
118
+ self.conv_q = nn.Conv1d(channels, channels, 1)
119
+ self.conv_k = nn.Conv1d(channels, channels, 1)
120
+ self.conv_v = nn.Conv1d(channels, channels, 1)
121
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
122
+ self.drop = nn.Dropout(p_dropout)
123
+
124
+ if window_size is not None:
125
+ n_heads_rel = 1 if heads_share else n_heads
126
+ rel_stddev = self.k_channels**-0.5
127
+ self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
128
+ self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
129
+
130
+ nn.init.xavier_uniform_(self.conv_q.weight)
131
+ nn.init.xavier_uniform_(self.conv_k.weight)
132
+ nn.init.xavier_uniform_(self.conv_v.weight)
133
+ if proximal_init:
134
+ with torch.no_grad():
135
+ self.conv_k.weight.copy_(self.conv_q.weight)
136
+ self.conv_k.bias.copy_(self.conv_q.bias)
137
+
138
+ def forward(self, x, c, attn_mask=None):
139
+ q = self.conv_q(x)
140
+ k = self.conv_k(c)
141
+ v = self.conv_v(c)
142
+
143
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
144
+
145
+ x = self.conv_o(x)
146
+ return x
147
+
148
+ def attention(self, query, key, value, mask=None):
149
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
150
+ b, d, t_s, t_t = (*key.size(), query.size(2))
151
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
152
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
153
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
154
+
155
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
156
+ if self.window_size is not None:
157
+ assert t_s == t_t, "Relative attention is only available for self-attention."
158
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
159
+ rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
160
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
161
+ scores = scores + scores_local
162
+ if self.proximal_bias:
163
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
164
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
165
+ if mask is not None:
166
+ scores = scores.masked_fill(mask == 0, -1e4)
167
+ if self.block_length is not None:
168
+ assert t_s == t_t, "Local attention is only available for self-attention."
169
+ block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
170
+ scores = scores.masked_fill(block_mask == 0, -1e4)
171
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
172
+ p_attn = self.drop(p_attn)
173
+ output = torch.matmul(p_attn, value)
174
+ if self.window_size is not None:
175
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
176
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
177
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
178
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
179
+ return output, p_attn
180
+
181
+ def _matmul_with_relative_values(self, x, y):
182
+ """
183
+ x: [b, h, l, m]
184
+ y: [h or 1, m, d]
185
+ ret: [b, h, l, d]
186
+ """
187
+ ret = torch.matmul(x, y.unsqueeze(0))
188
+ return ret
189
+
190
+ def _matmul_with_relative_keys(self, x, y):
191
+ """
192
+ x: [b, h, l, d]
193
+ y: [h or 1, m, d]
194
+ ret: [b, h, l, m]
195
+ """
196
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
197
+ return ret
198
+
199
+ def _get_relative_embeddings(self, relative_embeddings, length):
200
+ max_relative_position = 2 * self.window_size + 1
201
+ # Pad first before slice to avoid using cond ops.
202
+ pad_length = max(length - (self.window_size + 1), 0)
203
+ slice_start_position = max((self.window_size + 1) - length, 0)
204
+ slice_end_position = slice_start_position + 2 * length - 1
205
+ if pad_length > 0:
206
+ padded_relative_embeddings = F.pad(
207
+ relative_embeddings,
208
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
209
+ else:
210
+ padded_relative_embeddings = relative_embeddings
211
+ used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
212
+ return used_relative_embeddings
213
+
214
+ def _relative_position_to_absolute_position(self, x):
215
+ """
216
+ x: [b, h, l, 2*l-1]
217
+ ret: [b, h, l, l]
218
+ """
219
+ batch, heads, length, _ = x.size()
220
+ # Concat columns of pad to shift from relative to absolute indexing.
221
+ x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
222
+
223
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
224
+ x_flat = x.view([batch, heads, length * 2 * length])
225
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
226
+
227
+ # Reshape and slice out the padded elements.
228
+ x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
229
+ return x_final
230
+
231
+ def _absolute_position_to_relative_position(self, x):
232
+ """
233
+ x: [b, h, l, l]
234
+ ret: [b, h, l, 2*l-1]
235
+ """
236
+ batch, heads, length, _ = x.size()
237
+ # padd along column
238
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
239
+ x_flat = x.view([batch, heads, length**2 + length*(length -1)])
240
+ # add 0's in the beginning that will skew the elements after reshape
241
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
242
+ x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
243
+ return x_final
244
+
245
+ def _attention_bias_proximal(self, length):
246
+ """Bias for self-attention to encourage attention to close positions.
247
+ Args:
248
+ length: an integer scalar.
249
+ Returns:
250
+ a Tensor with shape [1, 1, length, length]
251
+ """
252
+ r = torch.arange(length, dtype=torch.float32)
253
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
254
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
255
+
256
+
257
+ class FFN(nn.Module):
258
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
259
+ super().__init__()
260
+ self.in_channels = in_channels
261
+ self.out_channels = out_channels
262
+ self.filter_channels = filter_channels
263
+ self.kernel_size = kernel_size
264
+ self.p_dropout = p_dropout
265
+ self.activation = activation
266
+ self.causal = causal
267
+
268
+ if causal:
269
+ self.padding = self._causal_padding
270
+ else:
271
+ self.padding = self._same_padding
272
+
273
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
274
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
275
+ self.drop = nn.Dropout(p_dropout)
276
+
277
+ def forward(self, x, x_mask):
278
+ x = self.conv_1(self.padding(x * x_mask))
279
+ if self.activation == "gelu":
280
+ x = x * torch.sigmoid(1.702 * x)
281
+ else:
282
+ x = torch.relu(x)
283
+ x = self.drop(x)
284
+ x = self.conv_2(self.padding(x * x_mask))
285
+ return x * x_mask
286
+
287
+ def _causal_padding(self, x):
288
+ if self.kernel_size == 1:
289
+ return x
290
+ pad_l = self.kernel_size - 1
291
+ pad_r = 0
292
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
293
+ x = F.pad(x, commons.convert_pad_shape(padding))
294
+ return x
295
+
296
+ def _same_padding(self, x):
297
+ if self.kernel_size == 1:
298
+ return x
299
+ pad_l = (self.kernel_size - 1) // 2
300
+ pad_r = self.kernel_size // 2
301
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
302
+ x = F.pad(x, commons.convert_pad_shape(padding))
303
+ return x
commons.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+
8
+ def init_weights(m, mean=0.0, std=0.01):
9
+ classname = m.__class__.__name__
10
+ if classname.find("Conv") != -1:
11
+ m.weight.data.normal_(mean, std)
12
+
13
+
14
+ def get_padding(kernel_size, dilation=1):
15
+ return int((kernel_size*dilation - dilation)/2)
16
+
17
+
18
+ def convert_pad_shape(pad_shape):
19
+ l = pad_shape[::-1]
20
+ pad_shape = [item for sublist in l for item in sublist]
21
+ return pad_shape
22
+
23
+
24
+ def intersperse(lst, item):
25
+ result = [item] * (len(lst) * 2 + 1)
26
+ result[1::2] = lst
27
+ return result
28
+
29
+
30
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
31
+ """KL(P||Q)"""
32
+ kl = (logs_q - logs_p) - 0.5
33
+ kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
34
+ return kl
35
+
36
+
37
+ def rand_gumbel(shape):
38
+ """Sample from the Gumbel distribution, protect from overflows."""
39
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
+ return -torch.log(-torch.log(uniform_samples))
41
+
42
+
43
+ def rand_gumbel_like(x):
44
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
+ return g
46
+
47
+
48
+ def slice_segments(x, ids_str, segment_size=4):
49
+ ret = torch.zeros_like(x[:, :, :segment_size])
50
+ for i in range(x.size(0)):
51
+ idx_str = ids_str[i]
52
+ idx_end = idx_str + segment_size
53
+ ret[i] = x[i, :, idx_str:idx_end]
54
+ return ret
55
+
56
+
57
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
58
+ b, d, t = x.size()
59
+ if x_lengths is None:
60
+ x_lengths = t
61
+ ids_str_max = x_lengths - segment_size + 1
62
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
63
+ ret = slice_segments(x, ids_str, segment_size)
64
+ return ret, ids_str
65
+
66
+
67
+ def get_timing_signal_1d(
68
+ length, channels, min_timescale=1.0, max_timescale=1.0e4):
69
+ position = torch.arange(length, dtype=torch.float)
70
+ num_timescales = channels // 2
71
+ log_timescale_increment = (
72
+ math.log(float(max_timescale) / float(min_timescale)) /
73
+ (num_timescales - 1))
74
+ inv_timescales = min_timescale * torch.exp(
75
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
76
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
77
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
78
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
79
+ signal = signal.view(1, channels, length)
80
+ return signal
81
+
82
+
83
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
84
+ b, channels, length = x.size()
85
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
86
+ return x + signal.to(dtype=x.dtype, device=x.device)
87
+
88
+
89
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
90
+ b, channels, length = x.size()
91
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
92
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
93
+
94
+
95
+ def subsequent_mask(length):
96
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
97
+ return mask
98
+
99
+
100
+ @torch.jit.script
101
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
102
+ n_channels_int = n_channels[0]
103
+ in_act = input_a + input_b
104
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
105
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
106
+ acts = t_act * s_act
107
+ return acts
108
+
109
+
110
+ def convert_pad_shape(pad_shape):
111
+ l = pad_shape[::-1]
112
+ pad_shape = [item for sublist in l for item in sublist]
113
+ return pad_shape
114
+
115
+
116
+ def shift_1d(x):
117
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
118
+ return x
119
+
120
+
121
+ def sequence_mask(length, max_length=None):
122
+ if max_length is None:
123
+ max_length = length.max()
124
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
125
+ return x.unsqueeze(0) < length.unsqueeze(1)
126
+
127
+
128
+ def generate_path(duration, mask):
129
+ """
130
+ duration: [b, 1, t_x]
131
+ mask: [b, 1, t_y, t_x]
132
+ """
133
+ device = duration.device
134
+
135
+ b, _, t_y, t_x = mask.shape
136
+ cum_duration = torch.cumsum(duration, -1)
137
+
138
+ cum_duration_flat = cum_duration.view(b * t_x)
139
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
140
+ path = path.view(b, t_x, t_y)
141
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
142
+ path = path.unsqueeze(1).transpose(2,3) * mask
143
+ return path
144
+
145
+
146
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
147
+ if isinstance(parameters, torch.Tensor):
148
+ parameters = [parameters]
149
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
150
+ norm_type = float(norm_type)
151
+ if clip_value is not None:
152
+ clip_value = float(clip_value)
153
+
154
+ total_norm = 0
155
+ for p in parameters:
156
+ param_norm = p.grad.data.norm(norm_type)
157
+ total_norm += param_norm.item() ** norm_type
158
+ if clip_value is not None:
159
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
160
+ total_norm = total_norm ** (1. / norm_type)
161
+ return total_norm
configs/bert_1.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45576c123c04240965a6510d270c11f59f204d0eb4e4998f140ad240e13ef506
3
+ size 59383851
configs/bert_3.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95be25c9e054ae02ac0b3a6adc45816330175c2078b24627ac2358e55c506017
3
+ size 77086251
configs/bert_5.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56a93eb75f84de8780c57f9c8507730985d90ed810cc48dac8788721cdfc7645
3
+ size 77086251
configs/step_1000000.t7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0714ff85804db43e06b3b0ac5749bf90cf206257c6c5916e8a98c5933b4c21e0
3
+ size 25185187
configs/vie_bert.yml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: "Checkpoint"
2
+ mixed_precision: "fp16"
3
+ data_folder: "pretrain/metadata/vie_preprocessed"
4
+ batch_size: 16
5
+ save_interval: 1
6
+ log_interval: 1
7
+ num_process: 1 # number of GPUs
8
+ num_steps: 1000000
9
+
10
+ dataset_params:
11
+ tokenizer: "vinai/phobert-base-v2"
12
+ token_separator: " " # token used for phoneme separator (space)
13
+ token_mask: "M" # token used for phoneme mask (M)
14
+ word_separator: 100000 # token used for word separator (<formula>)
15
+ token_maps: "token_maps.pkl" # token map path
16
+
17
+ max_mel_length: 512 # max phoneme length
18
+
19
+ word_mask_prob: 0.15 # probability to mask the entire word
20
+ phoneme_mask_prob: 0.1 # probability to mask each phoneme
21
+ replace_prob: 0.2 # probablity to replace phonemes
22
+
23
+ model_params:
24
+ vocab_size: 138
25
+ hidden_size: 768
26
+ num_attention_heads: 12
27
+ intermediate_size: 2048
28
+ max_position_embeddings: 512
29
+ num_hidden_layers: 12
30
+ dropout: 0.1
configs/vn_base.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 100,
4
+ "eval_interval": 1500,
5
+ "seed": 1234,
6
+ "epochs": 30000,
7
+ "learning_rate": 2e-5,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 5,
11
+ "fp16_run": false,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"filelists/vn_vc_train.txt.cleaned",
21
+ "validation_files":"filelists/vn_vc_val.txt.cleaned",
22
+ "text_cleaners":["vietnamese_cleaner"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 16000,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": false,
32
+ "n_speakers": 0,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 192,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false,
51
+ "gin_channels": 256}
52
+ }
data_utils.py ADDED
@@ -0,0 +1,634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import os
3
+ import random
4
+ import numpy as np
5
+ import torch
6
+ import torch.utils.data
7
+
8
+ import commons
9
+ from mel_processing import spectrogram_torch
10
+ from utils import load_wav_to_torch, load_filepaths_and_text
11
+ from text import text_to_sequence, cleaned_text_to_sequence
12
+
13
+
14
+ class TextAudioLoader(torch.utils.data.Dataset):
15
+ """
16
+ 1) loads audio, text pairs
17
+ 2) normalizes text and converts them to sequences of integers
18
+ 3) computes spectrograms from audio files.
19
+ """
20
+ def __init__(self, audiopaths_and_text, hparams):
21
+ self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
22
+ self.text_cleaners = hparams.text_cleaners
23
+ self.max_wav_value = hparams.max_wav_value
24
+ self.sampling_rate = hparams.sampling_rate
25
+ self.filter_length = hparams.filter_length
26
+ self.hop_length = hparams.hop_length
27
+ self.win_length = hparams.win_length
28
+ self.sampling_rate = hparams.sampling_rate
29
+
30
+ self.cleaned_text = getattr(hparams, "cleaned_text", False)
31
+
32
+ self.add_blank = hparams.add_blank
33
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
34
+ self.max_text_len = getattr(hparams, "max_text_len", 190)
35
+
36
+ random.seed(1234)
37
+ random.shuffle(self.audiopaths_and_text)
38
+ self._filter()
39
+
40
+
41
+ def _filter(self):
42
+ """
43
+ Filter text & store spec lengths
44
+ """
45
+ # Store spectrogram lengths for Bucketing
46
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
47
+ # spec_length = wav_length // hop_length
48
+
49
+ audiopaths_and_text_new = []
50
+ lengths = []
51
+ #self.audiopaths_and_text = self.audiopaths_and_text[1:]
52
+ #print(self.audiopaths_and_text)
53
+ for audiopath, text in self.audiopaths_and_text:
54
+ if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
55
+ audiopaths_and_text_new.append([audiopath, text])
56
+ lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
57
+ self.audiopaths_and_text = audiopaths_and_text_new
58
+ self.lengths = lengths
59
+
60
+ def get_audio_text_pair(self, audiopath_and_text):
61
+ # separate filename and text
62
+ audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
63
+ text = self.get_text(text)
64
+ spec, wav = self.get_audio(audiopath)
65
+ return (text, spec, wav)
66
+
67
+ def get_audio(self, filename):
68
+ audio, sampling_rate = load_wav_to_torch(filename)
69
+ if sampling_rate != self.sampling_rate:
70
+ raise ValueError("{} SR doesn't match target {} SR".format(
71
+ sampling_rate, self.sampling_rate))
72
+ audio_norm = audio / self.max_wav_value
73
+ audio_norm = audio_norm.unsqueeze(0)
74
+ spec_filename = filename.replace(".wav", ".spec.pt")
75
+ if os.path.exists(spec_filename):
76
+ spec = torch.load(spec_filename)
77
+ else:
78
+ spec = spectrogram_torch(audio_norm, self.filter_length,
79
+ self.sampling_rate, self.hop_length, self.win_length,
80
+ center=False)
81
+ spec = torch.squeeze(spec, 0)
82
+ # out = f"/content/drive/MyDrive/Aimesoft - Internship/Text To Speech/vits/jp_dataset/basic5000/spec/{spec_filename}"
83
+ torch.save(spec, spec_filename)
84
+ # torch.save(spec, out)
85
+ return spec, audio_norm
86
+
87
+ def get_text(self, text):
88
+ if self.cleaned_text:
89
+ text_norm = cleaned_text_to_sequence(text)
90
+ else:
91
+ text_norm = text_to_sequence(text, self.text_cleaners)
92
+ if self.add_blank:
93
+ text_norm = commons.intersperse(text_norm, 0)
94
+ text_norm = torch.LongTensor(text_norm)
95
+ return text_norm
96
+
97
+ def __getitem__(self, index):
98
+ return self.get_audio_text_pair(self.audiopaths_and_text[index])
99
+
100
+ def __len__(self):
101
+ return len(self.audiopaths_and_text)
102
+
103
+
104
+ class TextAudioCollate():
105
+ """ Zero-pads model inputs and targets
106
+ """
107
+ def __init__(self, return_ids=False):
108
+ self.return_ids = return_ids
109
+
110
+ def __call__(self, batch):
111
+ """Collate's training batch from normalized text and aduio
112
+ PARAMS
113
+ ------
114
+ batch: [text_normalized, spec_normalized, wav_normalized]
115
+ """
116
+ # Right zero-pad all one-hot text sequences to max input length
117
+ _, ids_sorted_decreasing = torch.sort(
118
+ torch.LongTensor([x[1].size(1) for x in batch]),
119
+ dim=0, descending=True)
120
+
121
+ max_text_len = max([len(x[0]) for x in batch])
122
+ max_spec_len = max([x[1].size(1) for x in batch])
123
+ max_wav_len = max([x[2].size(1) for x in batch])
124
+
125
+ text_lengths = torch.LongTensor(len(batch))
126
+ spec_lengths = torch.LongTensor(len(batch))
127
+ wav_lengths = torch.LongTensor(len(batch))
128
+
129
+ text_padded = torch.LongTensor(len(batch), max_text_len)
130
+ spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
131
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
132
+ text_padded.zero_()
133
+ spec_padded.zero_()
134
+ wav_padded.zero_()
135
+ for i in range(len(ids_sorted_decreasing)):
136
+ row = batch[ids_sorted_decreasing[i]]
137
+
138
+ text = row[0]
139
+ text_padded[i, :text.size(0)] = text
140
+ text_lengths[i] = text.size(0)
141
+
142
+ spec = row[1]
143
+ spec_padded[i, :, :spec.size(1)] = spec
144
+ spec_lengths[i] = spec.size(1)
145
+
146
+ wav = row[2]
147
+ wav_padded[i, :, :wav.size(1)] = wav
148
+ wav_lengths[i] = wav.size(1)
149
+
150
+ old_length = torch.LongTensor([x[1].size(1) for x in batch])
151
+
152
+ if self.return_ids:
153
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing
154
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths
155
+
156
+
157
+ """Multi speaker version"""
158
+ class TextAudioSpeakerLoader(torch.utils.data.Dataset):
159
+ """
160
+ 1) loads audio, speaker_id, text pairs
161
+ 2) normalizes text and converts them to sequences of integers
162
+ 3) computes spectrograms from audio files.
163
+ """
164
+ def __init__(self, audiopaths_sid_text, hparams):
165
+ self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
166
+ self.text_cleaners = hparams.text_cleaners
167
+ self.max_wav_value = hparams.max_wav_value
168
+ self.sampling_rate = hparams.sampling_rate
169
+ self.filter_length = hparams.filter_length
170
+ self.hop_length = hparams.hop_length
171
+ self.win_length = hparams.win_length
172
+ self.sampling_rate = hparams.sampling_rate
173
+
174
+ self.cleaned_text = getattr(hparams, "cleaned_text", False)
175
+
176
+ self.add_blank = hparams.add_blank
177
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
178
+ self.max_text_len = getattr(hparams, "max_text_len", 190)
179
+
180
+ random.seed(1234)
181
+ random.shuffle(self.audiopaths_sid_text)
182
+ self._filter()
183
+
184
+ def _filter(self):
185
+ """
186
+ Filter text & store spec lengths
187
+ """
188
+ # Store spectrogram lengths for Bucketing
189
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
190
+ # spec_length = wav_length // hop_length
191
+
192
+ audiopaths_sid_text_new = []
193
+ lengths = []
194
+ for idx in self.audiopaths_sid_text:
195
+ if len(idx) != 3:
196
+ print(idx)
197
+ for audiopath, sid, text in self.audiopaths_sid_text:
198
+ if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
199
+ audiopaths_sid_text_new.append([audiopath, sid, text])
200
+ lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
201
+ self.audiopaths_sid_text = audiopaths_sid_text_new
202
+ self.lengths = lengths
203
+
204
+ def get_audio_text_speaker_pair(self, audiopath_sid_text):
205
+ # separate filename, speaker_id and text
206
+ audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
207
+ text = self.get_text(text)
208
+ spec, wav = self.get_audio(audiopath)
209
+ sid = self.get_sid(sid)
210
+ return (text, spec, wav, sid)
211
+
212
+ def get_audio(self, filename):
213
+ audio, sampling_rate = load_wav_to_torch(filename)
214
+ if sampling_rate != self.sampling_rate:
215
+ raise ValueError("{} SR doesn't match target {} SR".format(
216
+ sampling_rate, self.sampling_rate))
217
+ audio_norm = audio / self.max_wav_value
218
+ audio_norm = audio_norm.unsqueeze(0)
219
+ spec_filename = filename.replace(".wav", ".spec.pt")
220
+ if os.path.exists(spec_filename):
221
+ #print(spec_filename)
222
+ spec = torch.load(spec_filename)
223
+ else:
224
+ #print(audio_norm.shape,'*****************************')
225
+ spec = spectrogram_torch(audio_norm, self.filter_length,
226
+ self.sampling_rate, self.hop_length, self.win_length,
227
+ center=False)
228
+ spec = torch.squeeze(spec, 0)
229
+ torch.save(spec, spec_filename)
230
+ return spec, audio_norm
231
+
232
+ def get_text(self, text):
233
+ if self.cleaned_text:
234
+ text_norm = cleaned_text_to_sequence(text)
235
+ else:
236
+ text_norm = text_to_sequence(text, self.text_cleaners)
237
+ if self.add_blank:
238
+ text_norm = commons.intersperse(text_norm, 0)
239
+ text_norm = torch.LongTensor(text_norm)
240
+ return text_norm
241
+
242
+ def get_sid(self, sid):
243
+ sid = torch.LongTensor([int(sid)])
244
+ return sid
245
+
246
+ def __getitem__(self, index):
247
+ return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
248
+
249
+ def __len__(self):
250
+ return len(self.audiopaths_sid_text)
251
+
252
+
253
+ class TextAudioSpeakerCollate():
254
+ """ Zero-pads model inputs and targets
255
+ """
256
+ def __init__(self, return_ids=False):
257
+ self.return_ids = return_ids
258
+
259
+ def __call__(self, batch):
260
+ """Collate's training batch from normalized text, audio and speaker identities
261
+ PARAMS
262
+ ------
263
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
264
+ """
265
+ # Right zero-pad all one-hot text sequences to max input length
266
+ _, ids_sorted_decreasing = torch.sort(
267
+ torch.LongTensor([x[1].size(1) for x in batch]),
268
+ dim=0, descending=True)
269
+
270
+ max_text_len = max([len(x[0]) for x in batch])
271
+ max_spec_len = max([x[1].size(1) for x in batch])
272
+ max_wav_len = max([x[2].size(1) for x in batch])
273
+
274
+ text_lengths = torch.LongTensor(len(batch))
275
+ spec_lengths = torch.LongTensor(len(batch))
276
+ wav_lengths = torch.LongTensor(len(batch))
277
+ sid = torch.LongTensor(len(batch))
278
+
279
+ text_padded = torch.LongTensor(len(batch), max_text_len)
280
+ spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
281
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
282
+ text_padded.zero_()
283
+ spec_padded.zero_()
284
+ wav_padded.zero_()
285
+ for i in range(len(ids_sorted_decreasing)):
286
+ row = batch[ids_sorted_decreasing[i]]
287
+
288
+ text = row[0]
289
+ text_padded[i, :text.size(0)] = text
290
+ text_lengths[i] = text.size(0)
291
+
292
+ spec = row[1]
293
+ spec_padded[i, :, :spec.size(1)] = spec
294
+ spec_lengths[i] = spec.size(1)
295
+
296
+ wav = row[2]
297
+ wav_padded[i, :, :wav.size(1)] = wav
298
+ wav_lengths[i] = wav.size(1)
299
+
300
+ sid[i] = row[3]
301
+
302
+ if self.return_ids:
303
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
304
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
305
+
306
+
307
+ class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
308
+ """
309
+ Maintain similar input lengths in a batch.
310
+ Length groups are specified by boundaries.
311
+ Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
312
+
313
+ It removes samples which are not included in the boundaries.
314
+ Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
315
+ """
316
+ def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
317
+ super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
318
+ self.lengths = dataset.lengths
319
+ self.batch_size = batch_size
320
+ self.boundaries = boundaries
321
+
322
+ self.buckets, self.num_samples_per_bucket = self._create_buckets()
323
+ self.total_size = sum(self.num_samples_per_bucket)
324
+ self.num_samples = self.total_size // self.num_replicas
325
+
326
+ def _create_buckets(self):
327
+ buckets = [[] for _ in range(len(self.boundaries) - 1)]
328
+ for i in range(len(self.lengths)):
329
+ length = self.lengths[i]
330
+ idx_bucket = self._bisect(length)
331
+ if idx_bucket != -1:
332
+ buckets[idx_bucket].append(i)
333
+
334
+ for i in range(len(buckets) - 2, -1, -1):
335
+ if len(buckets[i]) == 0:
336
+ buckets.pop(i)
337
+ self.boundaries.pop(i+1)
338
+
339
+ num_samples_per_bucket = []
340
+ for i in range(len(buckets)):
341
+ len_bucket = len(buckets[i])
342
+ total_batch_size = self.num_replicas * self.batch_size
343
+ rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
344
+ num_samples_per_bucket.append(len_bucket + rem)
345
+ return buckets, num_samples_per_bucket
346
+
347
+ def __iter__(self):
348
+ # deterministically shuffle based on epoch
349
+ g = torch.Generator()
350
+ g.manual_seed(self.epoch)
351
+
352
+ indices = []
353
+ if self.shuffle:
354
+ for bucket in self.buckets:
355
+ indices.append(torch.randperm(len(bucket), generator=g).tolist())
356
+ else:
357
+ for bucket in self.buckets:
358
+ indices.append(list(range(len(bucket))))
359
+
360
+ batches = []
361
+ for i in range(len(self.buckets)):
362
+ bucket = self.buckets[i]
363
+ len_bucket = len(bucket)
364
+ # if len_bucket == 0:
365
+ # continue
366
+ ids_bucket = indices[i]
367
+ num_samples_bucket = self.num_samples_per_bucket[i]
368
+
369
+ # add extra samples to make it evenly divisible
370
+ #print(self.lengths)
371
+ rem = num_samples_bucket - len_bucket
372
+ #print(self.lengths)
373
+ ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
374
+
375
+ # subsample
376
+ ids_bucket = ids_bucket[self.rank::self.num_replicas]
377
+
378
+ # batching
379
+ for j in range(len(ids_bucket) // self.batch_size):
380
+ batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]]
381
+ batches.append(batch)
382
+
383
+ if self.shuffle:
384
+ batch_ids = torch.randperm(len(batches), generator=g).tolist()
385
+ batches = [batches[i] for i in batch_ids]
386
+ self.batches = batches
387
+
388
+ assert len(self.batches) * self.batch_size == self.num_samples
389
+ return iter(self.batches)
390
+
391
+ def _bisect(self, x, lo=0, hi=None):
392
+ if hi is None:
393
+ hi = len(self.boundaries) - 1
394
+
395
+ if hi > lo:
396
+ mid = (hi + lo) // 2
397
+ if self.boundaries[mid] < x and x <= self.boundaries[mid+1]:
398
+ return mid
399
+ elif x <= self.boundaries[mid]:
400
+ return self._bisect(x, lo, mid)
401
+ else:
402
+ return self._bisect(x, mid + 1, hi)
403
+ else:
404
+ return -1
405
+
406
+ def __len__(self):
407
+ return self.num_samples // self.batch_size
408
+
409
+ '''Voice-conversion problem'''
410
+ class TextAudioVCLoader(torch.utils.data.Dataset):
411
+ """
412
+ 1) loads audio, speaker_id, text pairs
413
+ 2) normalizes text and converts them to sequences of integers
414
+ 3) computes spectrograms from audio files.
415
+ """
416
+ def __init__(self, audiopaths_sid_text, hparams):
417
+ self.max_mel_length = 192
418
+ self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
419
+ self.text_cleaners = hparams.text_cleaners
420
+ self.max_wav_value = hparams.max_wav_value
421
+ self.sampling_rate = hparams.sampling_rate
422
+ self.filter_length = hparams.filter_length
423
+ self.hop_length = hparams.hop_length
424
+ self.win_length = hparams.win_length
425
+ self.sampling_rate = hparams.sampling_rate
426
+
427
+ self.cleaned_text = getattr(hparams, "cleaned_text", False)
428
+
429
+ self.add_blank = hparams.add_blank
430
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
431
+ self.max_text_len = getattr(hparams, "max_text_len", 190)
432
+
433
+ random.seed(1234)
434
+ random.shuffle(self.audiopaths_sid_text)
435
+ self._filter()
436
+ self.data_list_per_class = {
437
+ str(target): [[path, label, _] for path, label, _ in self.audiopaths_sid_text if label != target] \
438
+ for target in list(set([label for _, label, _ in self.audiopaths_sid_text]))}
439
+ #print(self.audiopaths_sid_text)
440
+ #print(self.data_list_per_class)
441
+
442
+ def _filter(self):
443
+ """
444
+ Filter text & store spec lengths
445
+ """
446
+ # Store spectrogram lengths for Bucketing
447
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
448
+ # spec_length = wav_length // hop_length
449
+
450
+ audiopaths_sid_text_new = []
451
+ lengths = []
452
+ for idx in self.audiopaths_sid_text:
453
+ if len(idx) != 3:
454
+ print(idx)
455
+ for audiopath, sid, text in self.audiopaths_sid_text:
456
+ if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
457
+ audiopaths_sid_text_new.append([audiopath, sid, text])
458
+ lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
459
+ self.audiopaths_sid_text = audiopaths_sid_text_new
460
+ self.lengths = lengths
461
+
462
+ def get_audio_text_speaker_pair(self, audiopath_sid_text):
463
+ # separate filename, speaker_id and text
464
+ audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
465
+ text = self.get_text(text)
466
+
467
+ spec, wav = self.get_audio(audiopath)
468
+
469
+ mel_length = spec.size(1)
470
+ if mel_length > self.max_mel_length:
471
+ random_start = np.random.randint(0, mel_length - self.max_mel_length)
472
+ spec = spec[:, random_start:random_start + self.max_mel_length]
473
+
474
+ sid = self.get_sid(sid)
475
+ return (text, spec, wav, sid)
476
+
477
+ def get_audio(self, filename):
478
+ audio, sampling_rate = load_wav_to_torch(filename)
479
+ if sampling_rate != self.sampling_rate:
480
+ raise ValueError("{} SR doesn't match target {} SR".format(
481
+ sampling_rate, self.sampling_rate))
482
+ audio_norm = audio / self.max_wav_value
483
+ audio_norm = audio_norm.unsqueeze(0)
484
+ spec_filename = filename.replace(".wav", ".spec.pt")
485
+ if os.path.exists(spec_filename):
486
+ #print(spec_filename)
487
+ spec = torch.load(spec_filename)
488
+ else:
489
+ spec = spectrogram_torch(audio_norm, self.filter_length,
490
+ self.sampling_rate, self.hop_length, self.win_length,
491
+ center=False)
492
+ spec = torch.squeeze(spec, 0)
493
+ torch.save(spec, spec_filename)
494
+ return spec, audio_norm
495
+
496
+ def get_text(self, text):
497
+ if self.cleaned_text:
498
+ text_norm = cleaned_text_to_sequence(text)
499
+ else:
500
+ text_norm = text_to_sequence(text, self.text_cleaners)
501
+ if self.add_blank:
502
+ text_norm = commons.intersperse(text_norm, 0)
503
+ text_norm = torch.LongTensor(text_norm)
504
+ return text_norm
505
+
506
+ def get_sid(self, sid):
507
+ sid = torch.LongTensor([int(sid)])
508
+ return sid
509
+
510
+ def __getitem__(self, index):
511
+ (text, spec, wav, sid) = self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
512
+
513
+
514
+
515
+ ref2_data = random.choice(self.data_list_per_class[str(sid.item())])
516
+
517
+
518
+ (text_tgt, spec_tgt, wav_tgt, sid_tgt) = self.get_audio_text_speaker_pair(ref2_data)
519
+
520
+ # while True:
521
+ # random_integer = torch.randint(0, data_length, size=(1,)).item()
522
+ # if random_integer != index:
523
+ # (text_tgt, spec_tgt, wav_tgt, sid_tgt) = self.get_audio_text_speaker_pair(self.audiopaths_sid_text[random_integer])
524
+ # if sid_tgt != sid:
525
+ # break
526
+
527
+
528
+
529
+
530
+ return (text, spec, wav, sid, text_tgt, spec_tgt, wav_tgt, sid_tgt)
531
+
532
+ def __len__(self):
533
+ return len(self.audiopaths_sid_text)
534
+
535
+ class TextAudioVCCollate():
536
+ """ Zero-pads model inputs and targets
537
+ """
538
+ def __init__(self, return_ids=False):
539
+ self.return_ids = return_ids
540
+ self.max_mel_length = 192
541
+
542
+ def __call__(self, batch):
543
+ """Collate's training batch from normalized text, audio and speaker identities
544
+ PARAMS
545
+ ------
546
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
547
+ """
548
+ # Right zero-pad all one-hot text sequences to max input length
549
+ _, ids_sorted_decreasing = torch.sort(
550
+ torch.LongTensor([x[1].size(1) for x in batch]),
551
+ dim=0, descending=True)
552
+
553
+ _, ids_sorted_decreasing_tgt = torch.sort(
554
+ torch.LongTensor([x[5].size(1) for x in batch]),
555
+ dim=0, descending=True)
556
+
557
+ ### SRC
558
+ max_text_len = max([len(x[0]) for x in batch])
559
+ max_spec_len = max([x[1].size(1) for x in batch])
560
+ max_wav_len = max([x[2].size(1) for x in batch])
561
+
562
+ text_lengths = torch.LongTensor(len(batch))
563
+ spec_lengths = torch.LongTensor(len(batch))
564
+ wav_lengths = torch.LongTensor(len(batch))
565
+ sid = torch.LongTensor(len(batch))
566
+
567
+ text_padded = torch.LongTensor(len(batch), max_text_len)
568
+ spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), self.max_mel_length)
569
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
570
+ text_padded.zero_()
571
+ spec_padded.zero_()
572
+ wav_padded.zero_()
573
+
574
+
575
+ ### TGT
576
+ max_text_len_tgt = max([len(x[4]) for x in batch])
577
+ max_spec_len_tgt = max([x[5].size(1) for x in batch])
578
+ max_wav_len_tgt = max([x[6].size(1) for x in batch])
579
+
580
+ # max_text_len_tgt = max([len(x[4]) for x in batch])
581
+ # max_spec_len_tgt = max([x[5].size(1) for x in batch])
582
+ # max_wav_len_tgt = max([x[6].size(1) for x in batch])
583
+
584
+ text_lengths_tgt = torch.LongTensor(len(batch))
585
+ spec_lengths_tgt = torch.LongTensor(len(batch))
586
+ wav_lengths_tgt = torch.LongTensor(len(batch))
587
+ sid_tgt = torch.LongTensor(len(batch))
588
+
589
+ text_padded_tgt = torch.LongTensor(len(batch), max_text_len_tgt)
590
+ spec_padded_tgt = torch.FloatTensor(len(batch), batch[0][1].size(0), self.max_mel_length)
591
+ wav_padded_tgt = torch.FloatTensor(len(batch), 1, max_wav_len_tgt)
592
+
593
+ # text_padded_tgt = torch.LongTensor(len(batch), max_text_len)
594
+ # spec_padded_tgt = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
595
+ # wav_padded_tgt = torch.FloatTensor(len(batch), 1, max_wav_len)
596
+ text_padded_tgt.zero_()
597
+ spec_padded_tgt.zero_()
598
+ wav_padded_tgt.zero_()
599
+
600
+
601
+ for i in range(len(ids_sorted_decreasing)):
602
+ row = batch[ids_sorted_decreasing[i]]
603
+
604
+ text = row[0]
605
+ text_padded[i, :text.size(0)] = text
606
+ text_lengths[i] = text.size(0)
607
+
608
+ spec = row[1]
609
+ spec_padded[i, :, :spec.size(1)] = spec
610
+ spec_lengths[i] = spec.size(1)
611
+
612
+ wav = row[2]
613
+ wav_padded[i, :, :wav.size(1)] = wav
614
+ wav_lengths[i] = wav.size(1)
615
+
616
+ sid[i] = row[3]
617
+
618
+ text = row[4]
619
+ text_padded_tgt[i, :text.size(0)] = text
620
+ text_lengths_tgt[i] = text.size(0)
621
+
622
+ spec = row[5]
623
+ spec_padded_tgt[i, :, :spec.size(1)] = spec
624
+ spec_lengths_tgt[i] = spec.size(1)
625
+
626
+ wav = row[6]
627
+ wav_padded_tgt[i, :, :wav.size(1)] = wav
628
+ wav_lengths_tgt[i] = wav.size(1)
629
+
630
+ sid_tgt[i] = row[7]
631
+
632
+ if self.return_ids:
633
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
634
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, text_padded_tgt, text_lengths_tgt, spec_padded_tgt, spec_lengths_tgt, wav_padded_tgt, wav_lengths_tgt, sid_tgt
infer_result/test_0.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11480a7ad0896954343fcaa7df1b5e8d8f159ad407438f8d075d98ef2bf572d2
3
+ size 137274
infer_result/test_1.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3abab7e83bbb1a288710230eca55e818af2b15fe972c82af69ad1f8a24d69dc0
3
+ size 144442
infer_result/test_2.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ca0f1170f8bcfdb4fa21a7262a6e5cab2fd90d61626184d1f9ca5937dd544f6
3
+ size 138298
infer_result/test_3.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f813715163e99a2527b12bb3357e07642a58c17850fc60f837579da30b93c428
3
+ size 135226
logs/large_audio/D_504000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99eb3b9d422a0f699084c3a5d9368db0baeca54a3679e645c8d4f30a8eed745d
3
+ size 561099143
logs/large_audio/G_504000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fcd58e2482132df1be614c984810bd94f9d588b25e522cfba26c7330dbc4f56
3
+ size 566715675
logs/male_vie/D_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9fe4a24e1c3c457c5d1d1d719d316a9fe97c7c7c962a7cfe7a1deac972015a4
3
+ size 561077841
logs/male_vie/D_1500.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad3015132b7ff1f8a04614deddad844f1b128e088af763ec4726444304dfd3ed
3
+ size 561093195
logs/male_vie/G_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c71cd0fe89131532c48090174b197728afa9edf113c55b63b582dad5bb7aae5
3
+ size 566523302
logs/male_vie/G_1500.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:831212a60e5cb63ecf6daa8cb126606c576f00810f73e6460c6ebcf9924c7ff9
3
+ size 566707193
logs/male_vie/G_20000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93d31b5cf4dbd84f0ffdf0b96266a971db7e8a567e77d1beb44dbf9e54b0c2fb
3
+ size 566711403
logs/male_vie/config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 100,
4
+ "eval_interval": 1500,
5
+ "seed": 1234,
6
+ "epochs": 30000,
7
+ "learning_rate": 2e-5,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 5,
11
+ "fp16_run": false,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"filelists/vn_vc_train.txt.cleaned",
21
+ "validation_files":"filelists/vn_vc_val.txt.cleaned",
22
+ "text_cleaners":["vietnamese_cleaner"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 16000,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": false,
32
+ "n_speakers": 0,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 192,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false,
51
+ "gin_channels": 256}
52
+ }
logs/male_vie/eval/events.out.tfevents.1710755437.HungVo.15112.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:244ad97bf168dcfcb0574f3773258b634951494a6b86332c28a143db0f28ce84
3
+ size 40
logs/male_vie/eval/events.out.tfevents.1710755461.HungVo.19504.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7867aa7613ac2bac3b2b3e73eaefddc26631bfdbc6734b88d1507ca639e175de
3
+ size 40
logs/male_vie/eval/events.out.tfevents.1710755705.HungVo.1052.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da810db8be159bbedc788f781fd841431502b3ba94be813a992d8e3fc70b950e
3
+ size 40
logs/male_vie/eval/events.out.tfevents.1710756795.HungVo.1832.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4820054fe8d7dd2d5f283a1fa817a5df7a13b5052c54b2e8386ca29486ce5ba5
3
+ size 40
logs/male_vie/eval/events.out.tfevents.1710756989.HungVo.1676.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dfb1d95e19e48b4f04047e4ded25c1b09874414bdf91c573c1be858b60132a0
3
+ size 40
logs/male_vie/eval/events.out.tfevents.1710764452.HungVo.23912.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9937f3e9c39a8c176ec3d204c4698f8abc64ae6461bbf7ddc4ae27d5e97e1526
3
+ size 371740
logs/male_vie/events.out.tfevents.1710669409.HungVo.3648.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9de4d302e8d4dd20fff4356d651430aa300f1e7d2a4d77fa7bb6765d0cf1a807
3
+ size 1038923
logs/male_vie/events.out.tfevents.1710755437.HungVo.15112.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de5309abf145937f48751974100b124258daa62b66e28329b9daaed25c9447cf
3
+ size 40
logs/male_vie/events.out.tfevents.1710755461.HungVo.19504.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fb7aa30d494fef5e6febc4ee468c4698a2e73d7a563f3cd24f4162632bd9383
3
+ size 40
logs/male_vie/events.out.tfevents.1710755705.HungVo.1052.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d40dd71f1760a64c5ee9e5c4f34d481d332680db9f719928ea1fb569001218d
3
+ size 40
logs/male_vie/events.out.tfevents.1710756795.HungVo.1832.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0604ad45ac42b3401baeaec49159c6b0efd56b7527e8dd4da9ba8884d0cf560b
3
+ size 40
logs/male_vie/events.out.tfevents.1710756989.HungVo.1676.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:587166d5ac3f02dbf43567acabf6cf42d5739c53401aa25dbca40caa802c8c3b
3
+ size 54276
logs/male_vie/events.out.tfevents.1710764452.HungVo.23912.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:421a00963101947143a67f30346281de0f63e65d09e1707da8ddaf65765b6b3c
3
+ size 55766
logs/male_vie/train.log ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-03-16 22:39:05,289 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
2
+ 2024-03-16 22:39:05,291 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
3
+ 2024-03-16 22:55:59,156 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
4
+ 2024-03-16 22:55:59,158 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
5
+ 2024-03-16 22:56:07,480 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
6
+ 2024-03-16 22:56:07,577 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
7
+ 2024-03-16 22:56:08,853 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
8
+ 2024-03-16 22:57:06,505 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
9
+ 2024-03-16 22:57:06,516 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
10
+ 2024-03-16 22:57:10,537 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
11
+ 2024-03-16 22:57:10,656 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
12
+ 2024-03-16 22:57:11,374 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
13
+ 2024-03-16 23:38:44,505 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
14
+ 2024-03-16 23:38:44,506 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
15
+ 2024-03-16 23:38:55,801 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
16
+ 2024-03-16 23:38:55,967 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
17
+ 2024-03-16 23:38:57,676 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
18
+ 2024-03-16 23:41:33,526 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 22050, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
19
+ 2024-03-16 23:41:33,526 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
20
+ 2024-03-16 23:41:42,239 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
21
+ 2024-03-16 23:41:42,376 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
22
+ 2024-03-16 23:41:44,029 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
23
+ 2024-03-16 23:43:36,013 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
24
+ 2024-03-16 23:43:36,014 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
25
+ 2024-03-16 23:43:41,121 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
26
+ 2024-03-16 23:43:41,224 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
27
+ 2024-03-16 23:43:42,492 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
28
+ 2024-03-16 23:45:38,654 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
29
+ 2024-03-16 23:45:38,656 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
30
+ 2024-03-16 23:45:42,883 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
31
+ 2024-03-16 23:45:42,991 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
32
+ 2024-03-16 23:45:43,615 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
33
+ 2024-03-16 23:47:12,589 male_vie INFO Train Epoch: 1 [0%]
34
+ 2024-03-16 23:47:12,590 male_vie INFO [6.651930809020996, 2.012359142303467, 19.67410659790039, 155.5628204345703, 2.70119047164917, 0, 1.8694475538262127e-05]
35
+ 2024-03-16 23:48:07,021 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\G_0.pth
36
+ 2024-03-16 23:48:09,245 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\D_0.pth
37
+ 2024-03-17 00:47:03,381 male_vie INFO Train Epoch: 1 [59%]
38
+ 2024-03-17 00:47:03,382 male_vie INFO [7.044645309448242, 2.1415274143218994, 20.651132583618164, 129.09628295898438, 2.7801339626312256, 100, 1.8694475538262127e-05]
39
+ 2024-03-17 01:27:12,382 male_vie INFO ====> Epoch: 1
40
+ 2024-03-17 01:46:40,904 male_vie INFO Train Epoch: 2 [18%]
41
+ 2024-03-17 01:46:40,905 male_vie INFO [7.893451690673828, 2.2248077392578125, 21.894351959228516, 127.06752014160156, 2.4763646125793457, 200, 1.8692138728819844e-05]
42
+ 2024-03-17 02:48:38,380 male_vie INFO Train Epoch: 2 [76%]
43
+ 2024-03-17 02:48:38,381 male_vie INFO [6.966772079467773, 2.119523763656616, 19.627840042114258, 129.53414916992188, 2.744324207305908, 300, 1.8692138728819844e-05]
44
+ 2024-03-17 03:13:15,504 male_vie INFO ====> Epoch: 2
45
+ 2024-03-17 08:10:19,082 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
46
+ 2024-03-17 08:10:19,082 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
47
+ 2024-03-17 08:10:27,436 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
48
+ 2024-03-17 08:10:27,547 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
49
+ 2024-03-17 08:10:28,772 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
50
+ 2024-03-17 08:11:42,723 male_vie INFO Train Epoch: 1 [0%]
51
+ 2024-03-17 08:11:42,733 male_vie INFO [6.651930809020996, 2.012359142303467, 19.67410659790039, 155.5628204345703, 2.70119047164917, 0, 1.8694475538262127e-05]
52
+ 2024-03-17 08:12:22,203 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\G_0.pth
53
+ 2024-03-17 08:12:24,579 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\D_0.pth
54
+ 2024-03-17 09:12:30,982 male_vie INFO Train Epoch: 1 [59%]
55
+ 2024-03-17 09:12:30,987 male_vie INFO [6.947372913360596, 2.238553047180176, 20.739242553710938, 129.25796508789062, 2.728516101837158, 100, 1.8694475538262127e-05]
56
+ 2024-03-17 09:55:54,573 male_vie INFO ====> Epoch: 1
57
+ 2024-03-17 10:18:16,984 male_vie INFO Train Epoch: 2 [18%]
58
+ 2024-03-17 10:18:16,993 male_vie INFO [7.852853775024414, 2.2317934036254883, 21.815223693847656, 127.02307891845703, 2.4501757621765137, 200, 1.8692138728819844e-05]
59
+ 2024-03-17 10:43:28,093 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
60
+ 2024-03-17 10:43:28,095 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
61
+ 2024-03-17 10:43:41,427 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
62
+ 2024-03-17 10:43:41,589 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
63
+ 2024-03-17 10:43:43,210 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
64
+ 2024-03-17 10:45:48,629 male_vie INFO Train Epoch: 1 [0%]
65
+ 2024-03-17 10:45:48,633 male_vie INFO [6.651930332183838, 2.012359142303467, 19.67410659790039, 155.5628204345703, 2.70119047164917, 0, 1.8694475538262127e-05]
66
+ 2024-03-17 10:46:52,354 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\G_0.pth
67
+ 2024-03-17 10:46:55,507 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\D_0.pth
68
+ 2024-03-17 11:55:59,065 male_vie INFO Train Epoch: 1 [59%]
69
+ 2024-03-17 11:55:59,086 male_vie INFO [6.929162502288818, 2.0876364707946777, 20.86014747619629, 129.14369201660156, 2.7928645610809326, 100, 1.8694475538262127e-05]
70
+ 2024-03-17 12:38:40,018 male_vie INFO ====> Epoch: 1
71
+ 2024-03-17 13:01:52,264 male_vie INFO Train Epoch: 2 [18%]
72
+ 2024-03-17 13:01:52,266 male_vie INFO [7.841618537902832, 2.1505513191223145, 21.62759780883789, 126.8673324584961, 2.4580678939819336, 200, 1.8692138728819844e-05]
73
+ 2024-03-17 14:01:36,851 male_vie INFO Train Epoch: 2 [76%]
74
+ 2024-03-17 14:01:36,862 male_vie INFO [6.845136642456055, 2.050686836242676, 19.50233268737793, 129.25625610351562, 2.833055019378662, 300, 1.8692138728819844e-05]
75
+ 2024-03-17 14:25:08,056 male_vie INFO ====> Epoch: 2
76
+ 2024-03-17 15:06:43,517 male_vie INFO Train Epoch: 3 [35%]
77
+ 2024-03-17 15:06:43,527 male_vie INFO [7.235151290893555, 2.246917247772217, 22.124237060546875, 120.701904296875, 2.603179454803467, 400, 1.868980221147874e-05]
78
+ 2024-03-17 16:56:49,197 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
79
+ 2024-03-17 16:56:49,198 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
80
+ 2024-03-17 16:56:57,975 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
81
+ 2024-03-17 16:56:58,241 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
82
+ 2024-03-17 16:56:59,727 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
83
+ 2024-03-17 16:58:31,794 male_vie INFO Train Epoch: 1 [0%]
84
+ 2024-03-17 16:58:31,795 male_vie INFO [6.651930332183838, 2.012359142303467, 19.67410659790039, 155.5628204345703, 2.70119047164917, 0, 1.8694475538262127e-05]
85
+ 2024-03-17 16:59:31,592 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\G_0.pth
86
+ 2024-03-17 16:59:33,562 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\D_0.pth
87
+ 2024-03-17 18:14:15,828 male_vie INFO Train Epoch: 1 [59%]
88
+ 2024-03-17 18:14:15,849 male_vie INFO [6.776693344116211, 2.0164573192596436, 20.75982666015625, 128.8275146484375, 2.8285598754882812, 100, 1.8694475538262127e-05]
89
+ 2024-03-17 19:03:34,032 male_vie INFO ====> Epoch: 1
90
+ 2024-03-17 19:26:39,055 male_vie INFO Train Epoch: 2 [18%]
91
+ 2024-03-17 19:26:39,056 male_vie INFO [7.91411018371582, 2.1998839378356934, 21.66338539123535, 126.89119720458984, 2.448057174682617, 200, 1.8692138728819844e-05]
92
+ 2024-03-17 20:39:25,488 male_vie INFO Train Epoch: 2 [76%]
93
+ 2024-03-17 20:39:25,668 male_vie INFO [6.839813709259033, 2.1088690757751465, 19.949024200439453, 129.727294921875, 2.8894267082214355, 300, 1.8692138728819844e-05]
94
+ 2024-03-17 21:12:10,478 male_vie INFO ====> Epoch: 2
95
+ 2024-03-17 22:01:41,446 male_vie INFO Train Epoch: 3 [35%]
96
+ 2024-03-17 22:01:41,466 male_vie INFO [7.033435821533203, 2.165224552154541, 22.49204444885254, 120.73246002197266, 2.6006181240081787, 400, 1.868980221147874e-05]
97
+ 2024-03-17 23:20:55,878 male_vie INFO Train Epoch: 3 [94%]
98
+ 2024-03-17 23:20:55,902 male_vie INFO [8.983758926391602, 2.2133359909057617, 23.290252685546875, 137.94297790527344, 2.397552490234375, 500, 1.868980221147874e-05]
99
+ 2024-03-17 23:26:43,293 male_vie INFO ====> Epoch: 3
100
+ 2024-03-18 00:26:12,148 male_vie INFO Train Epoch: 4 [53%]
101
+ 2024-03-18 00:26:12,149 male_vie INFO [6.280219078063965, 1.861822247505188, 17.809497833251953, 126.71278381347656, 2.603851318359375, 600, 1.8687465986202305e-05]
102
+ 2024-03-18 01:18:44,234 male_vie INFO ====> Epoch: 4
103
+ 2024-03-18 01:33:50,180 male_vie INFO Train Epoch: 5 [12%]
104
+ 2024-03-18 01:33:50,182 male_vie INFO [6.127190589904785, 2.126840114593506, 18.870405197143555, 128.9093475341797, 2.7763383388519287, 700, 1.868513005295403e-05]
105
+ 2024-03-18 02:47:57,915 male_vie INFO Train Epoch: 5 [71%]
106
+ 2024-03-18 02:47:57,916 male_vie INFO [6.46183967590332, 2.200648069381714, 20.458229064941406, 116.70836639404297, 2.575395345687866, 800, 1.868513005295403e-05]
107
+ 2024-03-18 03:23:38,545 male_vie INFO ====> Epoch: 5
108
+ 2024-03-18 04:00:33,927 male_vie INFO Train Epoch: 6 [29%]
109
+ 2024-03-18 04:00:33,928 male_vie INFO [4.161818027496338, 1.7568303346633911, 13.708724975585938, 115.80963897705078, 2.7804453372955322, 900, 1.868279441169741e-05]
110
+ 2024-03-18 05:16:40,479 male_vie INFO Train Epoch: 6 [88%]
111
+ 2024-03-18 05:16:40,481 male_vie INFO [6.6832356452941895, 2.0352120399475098, 18.298847198486328, 121.4527359008789, 2.6142640113830566, 1000, 1.868279441169741e-05]
112
+ 2024-03-18 05:31:20,308 male_vie INFO ====> Epoch: 6
113
+ 2024-03-18 06:35:05,853 male_vie INFO Train Epoch: 7 [47%]
114
+ 2024-03-18 06:35:05,860 male_vie INFO [5.207909107208252, 2.0261073112487793, 13.530330657958984, 107.13916778564453, 2.787060022354126, 1100, 1.8680459062395946e-05]
115
+ 2024-03-18 07:44:53,450 male_vie INFO ====> Epoch: 7
116
+ 2024-03-18 07:54:47,657 male_vie INFO Train Epoch: 8 [6%]
117
+ 2024-03-18 07:54:47,679 male_vie INFO [3.39747953414917, 1.691089153289795, 11.782424926757812, 116.05399322509766, 2.661785125732422, 1200, 1.8678124005013146e-05]
118
+ 2024-03-18 09:04:28,376 male_vie INFO Train Epoch: 8 [65%]
119
+ 2024-03-18 09:04:28,387 male_vie INFO [9.60261344909668, 2.518230438232422, 24.22492790222168, 134.94119262695312, 2.110210418701172, 1300, 1.8678124005013146e-05]
120
+ 2024-03-18 09:44:20,449 male_vie INFO ====> Epoch: 8
121
+ 2024-03-18 10:14:43,772 male_vie INFO Train Epoch: 9 [24%]
122
+ 2024-03-18 10:14:43,773 male_vie INFO [6.8275322914123535, 1.9893134832382202, 19.021554946899414, 129.34959411621094, 2.4710865020751953, 1400, 1.867578923951252e-05]
123
+ 2024-03-18 11:28:26,410 male_vie INFO Train Epoch: 9 [82%]
124
+ 2024-03-18 11:28:26,411 male_vie INFO [6.7782182693481445, 1.8750288486480713, 19.887435913085938, 125.86314392089844, 2.7619659900665283, 1500, 1.867578923951252e-05]
125
+ 2024-03-18 11:29:22,585 male_vie INFO Saving model and optimizer state at iteration 9 to ./logs\male_vie\G_1500.pth
126
+ 2024-03-18 11:29:24,584 male_vie INFO Saving model and optimizer state at iteration 9 to ./logs\male_vie\D_1500.pth
127
+ 2024-03-18 11:49:32,988 male_vie INFO ====> Epoch: 9
128
+ 2024-03-18 12:36:12,872 male_vie INFO Train Epoch: 10 [41%]
129
+ 2024-03-18 12:36:12,873 male_vie INFO [3.6889524459838867, 1.8166289329528809, 13.637166976928711, 118.66926574707031, 2.769404888153076, 1600, 1.867345476585758e-05]
130
+ 2024-03-18 13:41:15,406 male_vie INFO ====> Epoch: 10
131
+ 2024-03-18 13:42:38,258 male_vie INFO Train Epoch: 11 [0%]
132
+ 2024-03-18 13:42:38,259 male_vie INFO [8.472406387329102, 2.2948174476623535, 22.830230712890625, 129.91049194335938, 2.4730052947998047, 1700, 1.8671120584011846e-05]
133
+ 2024-03-18 14:52:01,014 male_vie INFO Train Epoch: 11 [59%]
134
+ 2024-03-18 14:52:01,015 male_vie INFO [6.468190670013428, 2.120546340942383, 19.966110229492188, 119.94347381591797, 2.5281734466552734, 1800, 1.8671120584011846e-05]
135
+ 2024-03-18 15:43:44,910 male_vie INFO ====> Epoch: 11
136
+ 2024-03-18 16:08:47,971 male_vie INFO Train Epoch: 12 [18%]
137
+ 2024-03-18 16:08:48,003 male_vie INFO [7.995875358581543, 2.2428460121154785, 22.295595169067383, 133.1580352783203, 2.4665212631225586, 1900, 1.8668786693938842e-05]
138
+ 2024-03-18 16:50:37,732 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
139
+ 2024-03-18 16:50:37,742 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
140
+ 2024-03-18 16:51:01,486 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
141
+ 2024-03-18 16:51:01,488 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
142
+ 2024-03-18 16:55:05,754 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
143
+ 2024-03-18 16:55:05,756 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
144
+ 2024-03-18 16:55:20,231 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
145
+ 2024-03-18 16:55:20,523 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
146
+ 2024-03-18 16:55:22,712 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
147
+ 2024-03-18 17:13:15,590 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
148
+ 2024-03-18 17:13:15,593 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
149
+ 2024-03-18 17:13:28,279 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
150
+ 2024-03-18 17:13:28,608 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
151
+ 2024-03-18 17:13:31,142 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
152
+ 2024-03-18 17:16:29,136 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
153
+ 2024-03-18 17:16:29,137 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
154
+ 2024-03-18 17:16:39,076 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
155
+ 2024-03-18 17:16:39,287 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
156
+ 2024-03-18 17:16:40,344 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
157
+ 2024-03-18 17:18:41,470 male_vie INFO Train Epoch: 1 [0%]
158
+ 2024-03-18 17:18:41,541 male_vie INFO [7.801321506500244, 2.2275946140289307, 19.761905670166016, 147.26947021484375, 2.7517318725585938, 0, 1.8694475538262127e-05]
159
+ 2024-03-18 19:20:52,862 male_vie INFO {'train': {'log_interval': 100, 'eval_interval': 1500, 'seed': 1234, 'epochs': 30000, 'learning_rate': 2e-05, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 5, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/vn_vc_train.txt.cleaned', 'validation_files': 'filelists/vn_vc_val.txt.cleaned', 'text_cleaners': ['vietnamese_cleaner'], 'max_wav_value': 32768.0, 'sampling_rate': 16000, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': False, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'model_dir': './logs\\male_vie'}
160
+ 2024-03-18 19:20:52,873 male_vie WARNING C:\Users\HungVo\Downloads\vits-japanese-main-male is not a git repository, therefore hash value comparison will be ignored.
161
+ 2024-03-18 19:21:04,602 male_vie INFO enc_p.bert.embeddings.position_ids is not in the checkpoint
162
+ 2024-03-18 19:21:04,920 male_vie INFO Loaded checkpoint 'logs/large_audio\G_504000.pth' (iteration 515)
163
+ 2024-03-18 19:21:06,992 male_vie INFO Loaded checkpoint 'logs/large_audio\D_504000.pth' (iteration 515)
164
+ 2024-03-18 19:22:46,553 male_vie INFO Train Epoch: 1 [0%]
165
+ 2024-03-18 19:22:46,554 male_vie INFO [8.442647933959961, 2.208401918411255, 21.23743438720703, 158.06948852539062, 2.6139183044433594, 0, 1.8694475538262127e-05]
166
+ 2024-03-18 19:23:42,269 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\G_0.pth
167
+ 2024-03-18 19:23:43,888 male_vie INFO Saving model and optimizer state at iteration 1 to ./logs\male_vie\D_0.pth
mel_processing.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ import random
4
+ import torch
5
+ from torch import nn
6
+ import torch.nn.functional as F
7
+ import torch.utils.data
8
+ import numpy as np
9
+ import librosa
10
+ import librosa.util as librosa_util
11
+ from librosa.util import normalize, pad_center, tiny
12
+ from scipy.signal import get_window
13
+ from scipy.io.wavfile import read
14
+ from librosa.filters import mel as librosa_mel_fn
15
+
16
+ MAX_WAV_VALUE = 32768.0
17
+
18
+
19
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
20
+ """
21
+ PARAMS
22
+ ------
23
+ C: compression factor
24
+ """
25
+ return torch.log(torch.clamp(x, min=clip_val) * C)
26
+
27
+
28
+ def dynamic_range_decompression_torch(x, C=1):
29
+ """
30
+ PARAMS
31
+ ------
32
+ C: compression factor used to compress
33
+ """
34
+ return torch.exp(x) / C
35
+
36
+
37
+ def spectral_normalize_torch(magnitudes):
38
+ output = dynamic_range_compression_torch(magnitudes)
39
+ return output
40
+
41
+
42
+ def spectral_de_normalize_torch(magnitudes):
43
+ output = dynamic_range_decompression_torch(magnitudes)
44
+ return output
45
+
46
+
47
+ mel_basis = {}
48
+ hann_window = {}
49
+
50
+
51
+ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
52
+ if torch.min(y) < -1.:
53
+ print('min value is ', torch.min(y))
54
+ if torch.max(y) > 1.:
55
+ print('max value is ', torch.max(y))
56
+
57
+ global hann_window
58
+ dtype_device = str(y.dtype) + '_' + str(y.device)
59
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
60
+ if wnsize_dtype_device not in hann_window:
61
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
62
+
63
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
64
+ y = y.squeeze(1)
65
+
66
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
67
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
68
+
69
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
70
+ return spec
71
+
72
+
73
+ def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
74
+ global mel_basis
75
+ dtype_device = str(spec.dtype) + '_' + str(spec.device)
76
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
77
+ if fmax_dtype_device not in mel_basis:
78
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
79
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
80
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
81
+ spec = spectral_normalize_torch(spec)
82
+ return spec
83
+
84
+
85
+ def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
86
+ if torch.min(y) < -1.:
87
+ print('min value is ', torch.min(y))
88
+ if torch.max(y) > 1.:
89
+ print('max value is ', torch.max(y))
90
+
91
+ global mel_basis, hann_window
92
+ dtype_device = str(y.dtype) + '_' + str(y.device)
93
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
94
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
95
+ if fmax_dtype_device not in mel_basis:
96
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
97
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
98
+ if wnsize_dtype_device not in hann_window:
99
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
100
+
101
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
102
+ y = y.squeeze(1)
103
+
104
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
105
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
106
+
107
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
108
+
109
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
110
+ spec = spectral_normalize_torch(spec)
111
+
112
+ return spec
models_mel_style.py ADDED
@@ -0,0 +1,991 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ import os
7
+ import yaml
8
+ import commons
9
+ import modules
10
+ import attentions
11
+ import monotonic_align
12
+ import numpy as np
13
+ from mel_processing import mel_spectrogram_torch, spec_to_mel_torch, spectrogram_torch
14
+
15
+
16
+ from Attention import MultiHeadedAttention as BaseMultiHeadedAttention
17
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
18
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
19
+ from commons import init_weights, get_padding
20
+ from transformers import AlbertConfig, AlbertModel
21
+ from collections import OrderedDict
22
+ from text import sequence_to_text
23
+ import utils
24
+
25
+
26
+ log_dir = "configs"
27
+ config_path = os.path.join(log_dir, "vie_bert.yml")
28
+ plbert_config = yaml.safe_load(open(config_path))
29
+
30
+
31
+ # hps = utils.get_hparams()
32
+
33
+
34
+ class StochasticDurationPredictor(nn.Module):
35
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
36
+ super().__init__()
37
+ filter_channels = in_channels # it needs to be removed from future version.
38
+ self.in_channels = in_channels
39
+ self.filter_channels = filter_channels
40
+ self.kernel_size = kernel_size
41
+ self.p_dropout = p_dropout
42
+ self.n_flows = n_flows
43
+ self.gin_channels = gin_channels
44
+
45
+ self.log_flow = modules.Log()
46
+ self.flows = nn.ModuleList()
47
+ self.flows.append(modules.ElementwiseAffine(2))
48
+ for i in range(n_flows):
49
+ self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
50
+ self.flows.append(modules.Flip())
51
+
52
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
53
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
54
+ self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
55
+ self.post_flows = nn.ModuleList()
56
+ self.post_flows.append(modules.ElementwiseAffine(2))
57
+ for i in range(4):
58
+ self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
59
+ self.post_flows.append(modules.Flip())
60
+
61
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
62
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
63
+ self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
64
+ if gin_channels != 0:
65
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
66
+
67
+ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
68
+ x = torch.detach(x)
69
+ x = self.pre(x)
70
+ if g is not None:
71
+ g = torch.detach(g)
72
+ x = x + self.cond(g)
73
+ x = self.convs(x, x_mask)
74
+ x = self.proj(x) * x_mask
75
+
76
+ if not reverse:
77
+ flows = self.flows
78
+ assert w is not None
79
+
80
+ logdet_tot_q = 0
81
+ h_w = self.post_pre(w)
82
+ h_w = self.post_convs(h_w, x_mask)
83
+ h_w = self.post_proj(h_w) * x_mask
84
+ e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
85
+ z_q = e_q
86
+ for flow in self.post_flows:
87
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
88
+ logdet_tot_q += logdet_q
89
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
90
+ u = torch.sigmoid(z_u) * x_mask
91
+ z0 = (w - u) * x_mask
92
+ logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
93
+ logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
94
+
95
+ logdet_tot = 0
96
+ z0, logdet = self.log_flow(z0, x_mask)
97
+ logdet_tot += logdet
98
+ z = torch.cat([z0, z1], 1)
99
+ for flow in flows:
100
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
101
+ logdet_tot = logdet_tot + logdet
102
+ nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
103
+ return nll + logq # [b]
104
+ else:
105
+ flows = list(reversed(self.flows))
106
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
107
+ z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
108
+ for flow in flows:
109
+ z = flow(z, x_mask, g=x, reverse=reverse)
110
+ z0, z1 = torch.split(z, [1, 1], 1)
111
+ logw = z0
112
+ return logw
113
+
114
+
115
+ class DurationPredictor(nn.Module):
116
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
117
+ super().__init__()
118
+
119
+ self.in_channels = in_channels
120
+ self.filter_channels = filter_channels
121
+ self.kernel_size = kernel_size
122
+ self.p_dropout = p_dropout
123
+ self.gin_channels = gin_channels
124
+
125
+ self.drop = nn.Dropout(p_dropout)
126
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
127
+ self.norm_1 = modules.LayerNorm(filter_channels)
128
+ self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
129
+ self.norm_2 = modules.LayerNorm(filter_channels)
130
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
131
+
132
+ if gin_channels != 0:
133
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
134
+
135
+ def forward(self, x, x_mask, g=None):
136
+ x = torch.detach(x)
137
+ if g is not None:
138
+ g = torch.detach(g)
139
+ x = x + self.cond(g)
140
+ x = self.conv_1(x * x_mask)
141
+ x = torch.relu(x)
142
+ x = self.norm_1(x)
143
+ x = self.drop(x)
144
+ x = self.conv_2(x * x_mask)
145
+ x = torch.relu(x)
146
+ x = self.norm_2(x)
147
+ x = self.drop(x)
148
+ x = self.proj(x * x_mask)
149
+ return x * x_mask
150
+
151
+
152
+ def length_to_mask(lengths):
153
+ #print(lengths.max(),'final')
154
+ mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
155
+ mask = torch.gt(mask+1, lengths.unsqueeze(1))
156
+ return mask
157
+
158
+ class TextEncoder(nn.Module):
159
+ def __init__(self,
160
+ n_vocab,
161
+ out_channels,
162
+ hidden_channels,
163
+ filter_channels,
164
+ n_heads,
165
+ n_layers,
166
+ kernel_size,
167
+ p_dropout):
168
+ super().__init__()
169
+
170
+ self.out_channels = out_channels
171
+ #self.hidden_channels = hidden_channels
172
+ #self.p_dropout = p_dropout
173
+
174
+ #self.emb = nn.Embedding(n_vocab, hidden_channels)
175
+ #nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
176
+
177
+ self.encoder = attentions.Encoder(
178
+ hidden_channels,
179
+ filter_channels,
180
+ n_heads,
181
+ n_layers,
182
+ kernel_size,
183
+ p_dropout)
184
+
185
+ albert_base_configuration = AlbertConfig(**plbert_config['model_params'])
186
+ bert = AlbertModel(albert_base_configuration)
187
+
188
+
189
+
190
+
191
+ # checkpoint = torch.load(log_dir + "/step_1000000" + ".t7", map_location='cpu')
192
+ # state_dict = checkpoint['net']
193
+
194
+ checkpoint = torch.load(log_dir + "/bert_" + "5" + ".pt")
195
+ state_dict = checkpoint
196
+ new_state_dict = OrderedDict()
197
+ for k, v in state_dict.items():
198
+ name = k[7:] # remove `module.`
199
+ if name.startswith('encoder.'):
200
+ name = name[8:] # remove `encoder.`
201
+ new_state_dict[name] = v
202
+ #print(new_state_dict)
203
+ bert.load_state_dict(new_state_dict, strict=False)
204
+
205
+
206
+ # self.bert = bert.to('cuda')
207
+ self.bert = bert # no-gpu-inference
208
+
209
+ # print(self.bert.pooler.weight.requires_grad)
210
+ # print(self.bert.pooler.bias.requires_grad)
211
+ # for param in self.bert.pooler.weight.parameters():
212
+ # param.requires_grad = True # or True
213
+
214
+ # for param in self.bert.pooler.bias.parameters():
215
+ # param.requires_grad = True # or True
216
+
217
+ self.linear = nn.Linear(plbert_config['model_params']['hidden_size'], hidden_channels)
218
+ self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
219
+
220
+
221
+
222
+
223
+
224
+ def forward(self, x, x_lengths):
225
+ #print(x, x_lengths, 'test2')
226
+
227
+ attention_mask = length_to_mask(torch.Tensor(x_lengths))
228
+ #print(len(x[0]), len(attention_mask[0]), 'test3')
229
+ #print((~attention_mask).int())
230
+ # print(self.bert(x, attention_mask=(~attention_mask).int()))
231
+ x = self.bert(x, attention_mask=(~attention_mask).int()).last_hidden_state # [b, t, h1]
232
+
233
+
234
+
235
+ x = self.linear(x)
236
+ #x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
237
+ x = torch.transpose(x, 1, -1) # [b, h, t]
238
+
239
+ #x_mask = torch.gt(torch.arange(torch.Tensor(x_lengths).max()).unsqueeze(0).expand(torch.Tensor(x_lengths).shape[0], -1).type_as(torch.Tensor(x_lengths))+1, torch.Tensor(x_lengths).unsqueeze(1)).int()
240
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
241
+ #print(x_mask)
242
+
243
+ x = self.encoder(x * x_mask, x_mask)
244
+ stats = self.proj(x) * x_mask
245
+
246
+ m, logs = torch.split(stats, self.out_channels, dim=1)
247
+ return x, m, logs, x_mask
248
+
249
+
250
+ class ResidualCouplingBlock(nn.Module):
251
+ def __init__(self,
252
+ channels,
253
+ hidden_channels,
254
+ kernel_size,
255
+ dilation_rate,
256
+ n_layers,
257
+ n_flows=4,
258
+ gin_channels=0):
259
+ super().__init__()
260
+ self.channels = channels
261
+ self.hidden_channels = hidden_channels
262
+ self.kernel_size = kernel_size
263
+ self.dilation_rate = dilation_rate
264
+ self.n_layers = n_layers
265
+ self.n_flows = n_flows
266
+ self.gin_channels = gin_channels
267
+
268
+ self.flows = nn.ModuleList()
269
+ for i in range(n_flows):
270
+ self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
271
+ self.flows.append(modules.Flip())
272
+
273
+ def forward(self, x, x_mask, g=None, reverse=False):
274
+ if not reverse:
275
+ for flow in self.flows:
276
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
277
+ else:
278
+ for flow in reversed(self.flows):
279
+ x = flow(x, x_mask, g=g, reverse=reverse)
280
+ return x
281
+
282
+
283
+ class PosteriorEncoder(nn.Module):
284
+ def __init__(self,
285
+ in_channels,
286
+ out_channels,
287
+ hidden_channels,
288
+ kernel_size,
289
+ dilation_rate,
290
+ n_layers,
291
+ gin_channels=0):
292
+ super().__init__()
293
+ self.in_channels = in_channels
294
+ self.out_channels = out_channels
295
+ self.hidden_channels = hidden_channels
296
+ self.kernel_size = kernel_size
297
+ self.dilation_rate = dilation_rate
298
+ self.n_layers = n_layers
299
+ self.gin_channels = gin_channels
300
+
301
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
302
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
303
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
304
+
305
+ def forward(self, x, x_lengths, g=None):
306
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
307
+ x = self.pre(x) * x_mask
308
+ x = self.enc(x, x_mask, g=g)
309
+ stats = self.proj(x) * x_mask
310
+ m, logs = torch.split(stats, self.out_channels, dim=1)
311
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
312
+ return z, m, logs, x_mask
313
+
314
+
315
+ class Generator(torch.nn.Module):
316
+ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
317
+ super(Generator, self).__init__()
318
+ self.num_kernels = len(resblock_kernel_sizes)
319
+ self.num_upsamples = len(upsample_rates)
320
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
321
+ resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
322
+
323
+ self.ups = nn.ModuleList()
324
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
325
+ self.ups.append(weight_norm(
326
+ ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
327
+ k, u, padding=(k-u)//2)))
328
+
329
+ self.resblocks = nn.ModuleList()
330
+ for i in range(len(self.ups)):
331
+ ch = upsample_initial_channel//(2**(i+1))
332
+ for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
333
+ self.resblocks.append(resblock(ch, k, d))
334
+
335
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
336
+ self.ups.apply(init_weights)
337
+
338
+ if gin_channels != 0:
339
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
340
+
341
+ def forward(self, x, g=None):
342
+ x = self.conv_pre(x)
343
+ if g is not None:
344
+ x = x + self.cond(g)
345
+
346
+ for i in range(self.num_upsamples):
347
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
348
+ x = self.ups[i](x)
349
+ xs = None
350
+ for j in range(self.num_kernels):
351
+ if xs is None:
352
+ xs = self.resblocks[i*self.num_kernels+j](x)
353
+ else:
354
+ xs += self.resblocks[i*self.num_kernels+j](x)
355
+ x = xs / self.num_kernels
356
+ x = F.leaky_relu(x)
357
+ x = self.conv_post(x)
358
+ x = torch.tanh(x)
359
+
360
+ return x
361
+
362
+ def remove_weight_norm(self):
363
+ print('Removing weight norm...')
364
+ for l in self.ups:
365
+ remove_weight_norm(l)
366
+ for l in self.resblocks:
367
+ l.remove_weight_norm()
368
+
369
+
370
+ class DiscriminatorP(torch.nn.Module):
371
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
372
+ super(DiscriminatorP, self).__init__()
373
+ self.period = period
374
+ self.use_spectral_norm = use_spectral_norm
375
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
376
+ self.convs = nn.ModuleList([
377
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
378
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
379
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
380
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
381
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
382
+ ])
383
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
384
+
385
+ def forward(self, x):
386
+ fmap = []
387
+
388
+ # 1d to 2d
389
+ b, c, t = x.shape
390
+ if t % self.period != 0: # pad first
391
+ n_pad = self.period - (t % self.period)
392
+ x = F.pad(x, (0, n_pad), "reflect")
393
+ t = t + n_pad
394
+ x = x.view(b, c, t // self.period, self.period)
395
+
396
+ for l in self.convs:
397
+ x = l(x)
398
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
399
+ fmap.append(x)
400
+ x = self.conv_post(x)
401
+ fmap.append(x)
402
+ x = torch.flatten(x, 1, -1)
403
+
404
+ return x, fmap
405
+
406
+
407
+ class DiscriminatorS(torch.nn.Module):
408
+ def __init__(self, use_spectral_norm=False):
409
+ super(DiscriminatorS, self).__init__()
410
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
411
+ self.convs = nn.ModuleList([
412
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
413
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
414
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
415
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
416
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
417
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
418
+ ])
419
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
420
+
421
+ def forward(self, x):
422
+ fmap = []
423
+
424
+ for l in self.convs:
425
+ x = l(x)
426
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
427
+ fmap.append(x)
428
+ x = self.conv_post(x)
429
+ fmap.append(x)
430
+ x = torch.flatten(x, 1, -1)
431
+
432
+ return x, fmap
433
+
434
+
435
+ class MultiPeriodDiscriminator(torch.nn.Module):
436
+ def __init__(self, use_spectral_norm=False):
437
+ super(MultiPeriodDiscriminator, self).__init__()
438
+ periods = [2,3,5,7,11]
439
+
440
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
441
+ discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
442
+
443
+ self.discriminators = nn.ModuleList(discs)
444
+
445
+
446
+ def forward(self, y, y_hat):
447
+ y_d_rs = []
448
+ y_d_gs = []
449
+ fmap_rs = []
450
+ fmap_gs = []
451
+ for i, d in enumerate(self.discriminators):
452
+ y_d_r, fmap_r = d(y)
453
+ y_d_g, fmap_g = d(y_hat)
454
+ y_d_rs.append(y_d_r)
455
+ y_d_gs.append(y_d_g)
456
+ fmap_rs.append(fmap_r)
457
+ fmap_gs.append(fmap_g)
458
+
459
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
460
+
461
+
462
+
463
+ class SynthesizerTrn(nn.Module):
464
+ """
465
+ Synthesizer for Training
466
+ """
467
+
468
+ def __init__(self,
469
+ n_vocab,
470
+ spec_channels,
471
+ segment_size,
472
+ inter_channels,
473
+ hidden_channels,
474
+ filter_channels,
475
+ n_heads,
476
+ n_layers,
477
+ kernel_size,
478
+ p_dropout,
479
+ resblock,
480
+ resblock_kernel_sizes,
481
+ resblock_dilation_sizes,
482
+ upsample_rates,
483
+ upsample_initial_channel,
484
+ upsample_kernel_sizes,
485
+ n_speakers=0,
486
+ gin_channels=0,
487
+ use_sdp=True,
488
+ **kwargs):
489
+
490
+ super().__init__()
491
+ self.n_vocab = n_vocab
492
+ self.spec_channels = spec_channels
493
+ self.inter_channels = inter_channels
494
+ self.hidden_channels = hidden_channels
495
+ self.filter_channels = filter_channels
496
+ self.n_heads = n_heads
497
+ self.n_layers = n_layers
498
+ self.kernel_size = kernel_size
499
+ self.p_dropout = p_dropout
500
+ self.resblock = resblock
501
+ self.resblock_kernel_sizes = resblock_kernel_sizes
502
+ self.resblock_dilation_sizes = resblock_dilation_sizes
503
+ self.upsample_rates = upsample_rates
504
+ self.upsample_initial_channel = upsample_initial_channel
505
+ self.upsample_kernel_sizes = upsample_kernel_sizes
506
+ self.segment_size = segment_size
507
+ self.n_speakers = n_speakers
508
+ self.gin_channels = gin_channels
509
+
510
+ self.use_sdp = use_sdp
511
+
512
+ self.enc_p = TextEncoder(n_vocab,
513
+ inter_channels,
514
+ hidden_channels,
515
+ filter_channels,
516
+ n_heads,
517
+ n_layers,
518
+ kernel_size,
519
+ p_dropout)
520
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
521
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
522
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
523
+ self.style_encoder = StyleEmbedding()
524
+ if use_sdp:
525
+ self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
526
+ else:
527
+ self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
528
+
529
+ if n_speakers > 1:
530
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
531
+
532
+ def forward(self, x, x_lengths, mel, y, y_lengths, sid=None):
533
+ '''
534
+ set g = None for posterior enc, sdp(dp), vocoder except flow
535
+ '''
536
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
537
+
538
+
539
+ if self.n_speakers > 0:
540
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
541
+ else:
542
+ g = None
543
+ #* g: (8,256,1)
544
+ # z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
545
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=None)
546
+ #* y_mask:(8,1,262)
547
+ # z_p = self.flow(z, y_mask, g=g)
548
+ #* Zero-shot
549
+ style_vector = self.style_encoder(mel.transpose(1,2), torch.tensor(np.full((mel.shape[0]), mel.shape[2])))
550
+ z_p = self.flow(z, y_mask, g=style_vector.unsqueeze(-1))
551
+
552
+ with torch.no_grad():
553
+ # negative cross-entropy
554
+ s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
555
+ neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
556
+ neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
557
+ neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
558
+ neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
559
+ neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
560
+
561
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
562
+ attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
563
+
564
+ w = attn.sum(2)
565
+ if self.use_sdp:
566
+ # l_length = self.dp(x, x_mask, w, g=g)
567
+ l_length = self.dp(x, x_mask, w, g=None)
568
+ l_length = l_length / torch.sum(x_mask)
569
+ else:
570
+ logw_ = torch.log(w + 1e-6) * x_mask
571
+ # logw = self.dp(x, x_mask, g=g)
572
+ logw = self.dp(x, x_mask, g=None)
573
+ l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
574
+
575
+ # expand prior
576
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
577
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
578
+
579
+ z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
580
+ # o = self.dec(z_slice, g=g)
581
+ o = self.dec(z_slice, g=None)
582
+ return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
583
+ # def forward(self, mel_src, mel_tgt, y, y_lengths, y_ref, y_lengths_ref, sid=None):
584
+
585
+ # style_vector_src = self.style_encoder(mel_src.transpose(1,2), torch.tensor(np.full((mel_src.shape[0]), mel_src.shape[2])))
586
+ # style_vector_ref = self.style_encoder(mel_tgt.transpose(1,2), torch.tensor(np.full((mel_tgt.shape[0]), mel_tgt.shape[2])))
587
+ # ## SRC
588
+ # z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=None)
589
+ # z_p = self.flow(z, y_mask, g=style_vector_src.unsqueeze(-1))
590
+ # z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
591
+
592
+ # ## REF
593
+ # z_ref, z_q_ref, logs_q_ref, y_mask_ref = self.enc_q(y_ref, y_lengths_ref, g=None)
594
+ # z_p_ref = self.flow(z_ref, y_mask_ref, g=style_vector_ref.unsqueeze(-1))
595
+ # z_slice_ref, ids_slice_ref = commons.rand_slice_segments(z_ref, y_lengths_ref, self.segment_size)
596
+
597
+
598
+ # o = self.dec(z_slice, g=None)
599
+ # o_ref = self.dec(z_slice_ref, g=None)
600
+
601
+
602
+ # ## Style reconstruction
603
+ # z_vc = self.flow(z_p, y_mask, g=style_vector_ref.unsqueeze(-1), reverse=True)
604
+ # o_hat_vc = self.dec(z_vc * y_mask, g=None)
605
+ # o_hat_vc_mel = mel_spectrogram_torch(
606
+ # o_hat_vc.squeeze(1),
607
+ # hps.data.filter_length,
608
+ # hps.data.n_mel_channels,
609
+ # hps.data.sampling_rate,
610
+ # hps.data.hop_length,
611
+ # hps.data.win_length,
612
+ # hps.data.mel_fmin,
613
+ # hps.data.mel_fmax
614
+ # )
615
+ # # spec_vc = spectrogram_torch(y_hat_vc, hps.data.filter_length,
616
+ # # hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
617
+ # # center=False)
618
+
619
+ # # spec_vc = torch.squeeze(spec_vc, 0)
620
+
621
+ # # mel_vc = spec_to_mel_torch(
622
+ # # spec_vc,
623
+ # # hps.data.filter_length,
624
+ # # hps.data.n_mel_channels,
625
+ # # hps.data.sampling_rate,
626
+ # # hps.data.mel_fmin,
627
+ # # hps.data.mel_fmax)
628
+
629
+ # style_vector_vc = self.style_encoder(o_hat_vc_mel.transpose(1,2), torch.tensor(np.full((o_hat_vc_mel.shape[0]), o_hat_vc_mel.shape[2])))
630
+
631
+
632
+ # return o, o_ref, ids_slice, ids_slice_ref, y_mask, y_mask_ref, (z, z_ref, z_p, z_p_ref, logs_q, logs_q_ref), style_vector_vc, style_vector_ref
633
+
634
+
635
+ def infer(self, x, x_lengths, mel, mel_lengths = None, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
636
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
637
+ #print(m_p.transpose(1,2).shape, m_p.transpose(1,2))
638
+ if self.n_speakers > 0:
639
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
640
+ else:
641
+ g = None
642
+
643
+ if self.use_sdp:
644
+ # logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
645
+ logw = self.dp(x, x_mask, g=None, reverse=True, noise_scale=noise_scale_w)
646
+ else:
647
+ # logw = self.dp(x, x_mask, g=g)
648
+ logw = self.dp(x, x_mask, g=None)
649
+ w = torch.exp(logw) * x_mask * length_scale
650
+ w_ceil = torch.ceil(w)
651
+
652
+ #print(w_ceil)
653
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
654
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
655
+
656
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
657
+ attn = commons.generate_path(w_ceil, attn_mask)
658
+
659
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
660
+
661
+ #print(m_p.transpose(1,2).shape, m_p.transpose(1,2))
662
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
663
+
664
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
665
+ #print(z_p.transpose(1,2).shape, z_p.transpose(1,2))
666
+ #* used for mel style encoder
667
+ if mel_lengths is not None:
668
+ style_mask = torch.unsqueeze(commons.sequence_mask(mel_lengths, mel.size(2)), 1).to(x.dtype)
669
+ style_vector = self.style_encoder(mel.transpose(1,2), torch.tensor(np.full((mel.shape[0]), mel.shape[2])))
670
+ else:
671
+ style_vector = self.style_encoder(mel.transpose(1,2), torch.tensor(np.full((mel.shape[0]), mel.shape[2])))
672
+ # z = self.flow(z_p, y_mask, g=g, reverse=True)
673
+
674
+
675
+ z = self.flow(z_p, y_mask, g=style_vector.unsqueeze(-1), reverse=True)
676
+ # o = self.dec((z * y_mask)[:,:,:max_len], g=g)
677
+ o = self.dec((z * y_mask)[:,:,:max_len], g=None)
678
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
679
+
680
+
681
+ # def forward(self, spec_source_pattern, spec_lengths_source, mel_source, mel_ref):
682
+ # style_vector_src = self.style_encoder(mel_source.transpose(1,2), torch.tensor(np.full((mel_source.shape[0]), mel_source.shape[2])))
683
+ # style_vector_ref = self.style_encoder(mel_ref.transpose(1,2), torch.tensor(np.full((mel_ref.shape[0]), mel_ref.shape[2])))
684
+ # z, m_q, logs_q, y_mask = self.enc_q(spec_source_pattern, spec_lengths_source, g=None)
685
+ # z_p = self.flow(z, y_mask, g=style_vector_src.unsqueeze(-1))
686
+ # z_hat = self.flow(z_p, y_mask, g=style_vector_ref.unsqueeze(-1), reverse=True)
687
+ # o_hat = self.dec(z_hat * y_mask, g=None)
688
+ # z_slice, ids_slice = commons.rand_slice_segments(z, spec_lengths_source, self.segment_size)
689
+ # return o_hat, y_mask, (z, z_p, z_hat), style_vector_src, style_vector_ref, ids_slice
690
+
691
+ def voice_conversion(self, spec_source_pattern, spec_lengths_source, mel_source, mel_ref):
692
+ style_vector_src = self.style_encoder(mel_source.transpose(1,2), torch.tensor(np.full((mel_source.shape[0]), mel_source.shape[2])))
693
+ style_vector_ref = self.style_encoder(mel_ref.transpose(1,2), torch.tensor(np.full((mel_ref.shape[0]), mel_ref.shape[2])))
694
+ z, m_q, logs_q, y_mask = self.enc_q(spec_source_pattern, spec_lengths_source, g=None)
695
+ z_p = self.flow(z, y_mask, g=style_vector_src.unsqueeze(-1))
696
+ z_hat = self.flow(z_p, y_mask, g=style_vector_ref.unsqueeze(-1), reverse=True)
697
+ o_hat = self.dec(z_hat * y_mask, g=None)
698
+ return o_hat, y_mask, (z, z_p, z_hat)
699
+
700
+
701
+ class MelStyleEncoder(nn.Module):
702
+ ''' MelStyleEncoder '''
703
+ def __init__(self, n_mel_channels=80,
704
+ style_hidden=128,
705
+ style_vector_dim=256,
706
+ style_kernel_size=5,
707
+ style_head=2,
708
+ dropout=0.1):
709
+ super(MelStyleEncoder, self).__init__()
710
+ self.in_dim = n_mel_channels
711
+ self.hidden_dim = style_hidden
712
+ self.out_dim = style_vector_dim
713
+ self.kernel_size = style_kernel_size
714
+ self.n_head = style_head
715
+ self.dropout = dropout
716
+
717
+ self.spectral = nn.Sequential(
718
+ modules.LinearNorm(self.in_dim, self.hidden_dim),
719
+ modules.Mish(),
720
+ nn.Dropout(self.dropout),
721
+ modules.LinearNorm(self.hidden_dim, self.hidden_dim),
722
+ modules.Mish(),
723
+ nn.Dropout(self.dropout)
724
+ )
725
+
726
+ self.temporal = nn.Sequential(
727
+ modules.Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout),
728
+ modules.Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout),
729
+ )
730
+
731
+ self.slf_attn = modules.MultiHeadAttention(self.n_head, self.hidden_dim,
732
+ self.hidden_dim//self.n_head, self.hidden_dim//self.n_head, self.dropout)
733
+
734
+ self.fc = modules.LinearNorm(self.hidden_dim, self.out_dim)
735
+
736
+ def temporal_avg_pool(self, x, mask=None):
737
+ if mask is None:
738
+ out = torch.mean(x, dim=1)
739
+ else:
740
+ len_ = (~mask).sum(dim=1).unsqueeze(1)
741
+ x = x.masked_fill(mask.unsqueeze(-1), 0)
742
+ x = x.sum(dim=1)
743
+ out = torch.div(x, len_)
744
+ return out
745
+
746
+ def forward(self, x, mask=None):
747
+ max_len = x.shape[1]
748
+ slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1) if mask is not None else None
749
+
750
+ # spectral
751
+ x = self.spectral(x)
752
+ # temporal
753
+ x = x.transpose(1,2)
754
+ x = self.temporal(x)
755
+ x = x.transpose(1,2)
756
+ # self-attention
757
+ #print(x.shape, mask.shape)
758
+ if mask is not None:
759
+ x = x.masked_fill(mask.unsqueeze(-1), 0)
760
+ x, _ = self.slf_attn(x, mask=slf_attn_mask)
761
+ # fc
762
+ x = self.fc(x)
763
+ # temoral average pooling
764
+ w = self.temporal_avg_pool(x, mask=mask)
765
+
766
+ return w
767
+
768
+
769
+ class StyleEmbedding(torch.nn.Module):
770
+ def __init__(self):
771
+ super().__init__()
772
+ self.gst = StyleEncoder()
773
+
774
+ def forward(self,
775
+ batch_of_spectrograms,
776
+ batch_of_spectrogram_lengths,
777
+ return_all_outs=False,
778
+ return_only_refs=False):
779
+ minimum_sequence_length = 812
780
+ specs = list()
781
+
782
+ for index, spec_length in enumerate(batch_of_spectrogram_lengths):
783
+ spec = batch_of_spectrograms[index][:spec_length]
784
+ # double the length at least once, then check
785
+ spec = spec.repeat((2, 1))
786
+ current_spec_length = len(spec)
787
+ while current_spec_length < minimum_sequence_length:
788
+ # make it longer
789
+ spec = spec.repeat((2, 1))
790
+ current_spec_length = len(spec)
791
+ specs.append(spec[:812])
792
+
793
+ spec_batch = torch.stack(specs, dim=0)
794
+ return self.gst(speech=spec_batch,
795
+ return_all_outs=return_all_outs,
796
+ return_only_ref=return_only_refs)
797
+
798
+ class StyleEncoder(torch.nn.Module):
799
+ def __init__(
800
+ self,
801
+ idim: int = 80,
802
+ gst_tokens: int = 2000,
803
+ gst_token_dim: int = 256,
804
+ gst_heads: int = 8,
805
+ conv_layers: int = 8,
806
+ conv_chans_list=(32, 32, 64, 64, 128, 128, 256, 256),
807
+ conv_kernel_size: int = 3,
808
+ conv_stride: int = 2,
809
+ gst_layers: int = 2,
810
+ gst_units: int = 256,
811
+ ):
812
+ """Initialize global style encoder module."""
813
+ super(StyleEncoder, self).__init__()
814
+
815
+ self.num_tokens = gst_tokens
816
+ self.ref_enc = ReferenceEncoder(idim=idim,
817
+ conv_layers=conv_layers,
818
+ conv_chans_list=conv_chans_list,
819
+ conv_kernel_size=conv_kernel_size,
820
+ conv_stride=conv_stride,
821
+ gst_layers=gst_layers,
822
+ gst_units=gst_units, )
823
+ self.stl = StyleTokenLayer(ref_embed_dim=gst_units,
824
+ gst_tokens=gst_tokens,
825
+ gst_token_dim=gst_token_dim,
826
+ gst_heads=gst_heads, )
827
+
828
+ self.ref_mel = MelStyleEncoder(n_mel_channels = idim)
829
+
830
+ def forward(self, speech, return_all_outs=False, return_only_ref=False):
831
+ ref_mels = self.ref_mel(speech)
832
+ ref_embs = self.ref_enc(speech)
833
+ if return_only_ref and not return_all_outs:
834
+ return ref_embs
835
+ style_embs = self.stl(ref_embs)
836
+
837
+ if return_all_outs:
838
+ if return_only_ref:
839
+ return ref_embs, [ref_embs] + [style_embs]
840
+ return style_embs, [ref_embs] + [style_embs]
841
+
842
+ #print(style_embs.shape, ref_mels.shape, ref_embs.shape)
843
+ return style_embs + ref_mels
844
+
845
+ def calculate_ada4_regularization_loss(self):
846
+ losses = list()
847
+ for emb1_index in range(self.num_tokens):
848
+ for emb2_index in range(emb1_index + 1, self.num_tokens):
849
+ if emb1_index != emb2_index:
850
+ losses.append(torch.nn.functional.cosine_similarity(self.stl.gst_embs[emb1_index],
851
+ self.stl.gst_embs[emb2_index], dim=0))
852
+ return sum(losses)
853
+
854
+
855
+ class ReferenceEncoder(torch.nn.Module):
856
+
857
+ def __init__(
858
+ self,
859
+ idim=80,
860
+ conv_layers: int = 6,
861
+ conv_chans_list=(32, 32, 64, 64, 128, 128),
862
+ conv_kernel_size: int = 3,
863
+ conv_stride: int = 2,
864
+ gst_layers: int = 1,
865
+ gst_units: int = 128,
866
+ ):
867
+ """Initialize reference encoder module."""
868
+ super(ReferenceEncoder, self).__init__()
869
+
870
+ # check hyperparameters are valid
871
+ assert conv_kernel_size % 2 == 1, "kernel size must be odd."
872
+ assert (
873
+ len(conv_chans_list) == conv_layers), "the number of conv layers and length of channels list must be the same."
874
+
875
+ convs = []
876
+ padding = (conv_kernel_size - 1) // 2
877
+ for i in range(conv_layers):
878
+ conv_in_chans = 1 if i == 0 else conv_chans_list[i - 1]
879
+ conv_out_chans = conv_chans_list[i]
880
+ convs += [torch.nn.Conv2d(conv_in_chans,
881
+ conv_out_chans,
882
+ kernel_size=conv_kernel_size,
883
+ stride=conv_stride,
884
+ padding=padding,
885
+ # Do not use bias due to the following batch norm
886
+ bias=False, ),
887
+ torch.nn.BatchNorm2d(conv_out_chans),
888
+ torch.nn.ReLU(inplace=True), ]
889
+ self.convs = torch.nn.Sequential(*convs)
890
+
891
+ self.conv_layers = conv_layers
892
+ self.kernel_size = conv_kernel_size
893
+ self.stride = conv_stride
894
+ self.padding = padding
895
+
896
+ # get the number of GRU input units
897
+ gst_in_units = idim
898
+ for i in range(conv_layers):
899
+ gst_in_units = (gst_in_units - conv_kernel_size + 2 * padding) // conv_stride + 1
900
+ gst_in_units *= conv_out_chans
901
+ self.gst = torch.nn.GRU(gst_in_units, gst_units, gst_layers, batch_first=True)
902
+
903
+ def forward(self, speech):
904
+ """Calculate forward propagation.
905
+ Args:
906
+ speech (Tensor): Batch of padded target features (B, Lmax, idim).
907
+ Returns:
908
+ Tensor: Reference embedding (B, gst_units)
909
+ """
910
+ batch_size = speech.size(0)
911
+ xs = speech.unsqueeze(1) # (B, 1, Lmax, idim)
912
+ hs = self.convs(xs).transpose(1, 2) # (B, Lmax', conv_out_chans, idim')
913
+ time_length = hs.size(1)
914
+ hs = hs.contiguous().view(batch_size, time_length, -1) # (B, Lmax', gst_units)
915
+ self.gst.flatten_parameters()
916
+ # pack_padded_sequence(hs, speech_lens, enforce_sorted=False, batch_first=True)
917
+ _, ref_embs = self.gst(hs) # (gst_layers, batch_size, gst_units)
918
+ ref_embs = ref_embs[-1] # (batch_size, gst_units)
919
+
920
+ return ref_embs
921
+
922
+
923
+ class StyleTokenLayer(torch.nn.Module):
924
+ """Style token layer module.
925
+ This module is style token layer introduced in `Style Tokens: Unsupervised Style
926
+ Modeling, Control and Transfer in End-to-End Speech Synthesis`.
927
+ .. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End
928
+ Speech Synthesis`: https://arxiv.org/abs/1803.09017
929
+ Args:
930
+ ref_embed_dim (int, optional): Dimension of the input reference embedding.
931
+ gst_tokens (int, optional): The number of GST embeddings.
932
+ gst_token_dim (int, optional): Dimension of each GST embedding.
933
+ gst_heads (int, optional): The number of heads in GST multihead attention.
934
+ dropout_rate (float, optional): Dropout rate in multi-head attention.
935
+ """
936
+
937
+ def __init__(
938
+ self,
939
+ ref_embed_dim: int = 128,
940
+ gst_tokens: int = 10,
941
+ gst_token_dim: int = 128,
942
+ gst_heads: int = 4,
943
+ dropout_rate: float = 0.0,
944
+ ):
945
+ """Initialize style token layer module."""
946
+ super(StyleTokenLayer, self).__init__()
947
+
948
+ gst_embs = torch.randn(gst_tokens, gst_token_dim // gst_heads)
949
+ self.register_parameter("gst_embs", torch.nn.Parameter(gst_embs))
950
+ self.mha = MultiHeadedAttention(q_dim=ref_embed_dim,
951
+ k_dim=gst_token_dim // gst_heads,
952
+ v_dim=gst_token_dim // gst_heads,
953
+ n_head=gst_heads,
954
+ n_feat=gst_token_dim,
955
+ dropout_rate=dropout_rate, )
956
+
957
+ def forward(self, ref_embs):
958
+ """Calculate forward propagation.
959
+ Args:
960
+ ref_embs (Tensor): Reference embeddings (B, ref_embed_dim).
961
+ Returns:
962
+ Tensor: Style token embeddings (B, gst_token_dim).
963
+ """
964
+ batch_size = ref_embs.size(0)
965
+ # (num_tokens, token_dim) -> (batch_size, num_tokens, token_dim)
966
+ gst_embs = torch.tanh(self.gst_embs).unsqueeze(0).expand(batch_size, -1, -1)
967
+ # NOTE(kan-bayashi): Shoule we apply Tanh?
968
+ ref_embs = ref_embs.unsqueeze(1) # (batch_size, 1 ,ref_embed_dim)
969
+ style_embs = self.mha(ref_embs, gst_embs, gst_embs, None)
970
+
971
+ return style_embs.squeeze(1)
972
+
973
+
974
+ class MultiHeadedAttention(BaseMultiHeadedAttention):
975
+ """Multi head attention module with different input dimension."""
976
+
977
+ def __init__(self, q_dim, k_dim, v_dim, n_head, n_feat, dropout_rate=0.0):
978
+ """Initialize multi head attention module."""
979
+ # NOTE(kan-bayashi): Do not use super().__init__() here since we want to
980
+ # overwrite BaseMultiHeadedAttention.__init__() method.
981
+ torch.nn.Module.__init__(self)
982
+ assert n_feat % n_head == 0
983
+ # We assume d_v always equals d_k
984
+ self.d_k = n_feat // n_head
985
+ self.h = n_head
986
+ self.linear_q = torch.nn.Linear(q_dim, n_feat)
987
+ self.linear_k = torch.nn.Linear(k_dim, n_feat)
988
+ self.linear_v = torch.nn.Linear(v_dim, n_feat)
989
+ self.linear_out = torch.nn.Linear(n_feat, n_feat)
990
+ self.attn = None
991
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
modules.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import scipy
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
+ from torch.nn.utils import weight_norm, remove_weight_norm
11
+
12
+ import commons
13
+ from commons import init_weights, get_padding
14
+ from transforms import piecewise_rational_quadratic_transform
15
+
16
+
17
+ LRELU_SLOPE = 0.1
18
+
19
+
20
+ class LayerNorm(nn.Module):
21
+ def __init__(self, channels, eps=1e-5):
22
+ super().__init__()
23
+ self.channels = channels
24
+ self.eps = eps
25
+
26
+ self.gamma = nn.Parameter(torch.ones(channels))
27
+ self.beta = nn.Parameter(torch.zeros(channels))
28
+
29
+ def forward(self, x):
30
+ x = x.transpose(1, -1)
31
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
+ return x.transpose(1, -1)
33
+
34
+
35
+ class ConvReluNorm(nn.Module):
36
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
37
+ super().__init__()
38
+ self.in_channels = in_channels
39
+ self.hidden_channels = hidden_channels
40
+ self.out_channels = out_channels
41
+ self.kernel_size = kernel_size
42
+ self.n_layers = n_layers
43
+ self.p_dropout = p_dropout
44
+ assert n_layers > 1, "Number of layers should be larger than 0."
45
+
46
+ self.conv_layers = nn.ModuleList()
47
+ self.norm_layers = nn.ModuleList()
48
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
49
+ self.norm_layers.append(LayerNorm(hidden_channels))
50
+ self.relu_drop = nn.Sequential(
51
+ nn.ReLU(),
52
+ nn.Dropout(p_dropout))
53
+ for _ in range(n_layers-1):
54
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
55
+ self.norm_layers.append(LayerNorm(hidden_channels))
56
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
57
+ self.proj.weight.data.zero_()
58
+ self.proj.bias.data.zero_()
59
+
60
+ def forward(self, x, x_mask):
61
+ x_org = x
62
+ for i in range(self.n_layers):
63
+ x = self.conv_layers[i](x * x_mask)
64
+ x = self.norm_layers[i](x)
65
+ x = self.relu_drop(x)
66
+ x = x_org + self.proj(x)
67
+ return x * x_mask
68
+
69
+
70
+ class DDSConv(nn.Module):
71
+ """
72
+ Dialted and Depth-Separable Convolution
73
+ """
74
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
75
+ super().__init__()
76
+ self.channels = channels
77
+ self.kernel_size = kernel_size
78
+ self.n_layers = n_layers
79
+ self.p_dropout = p_dropout
80
+
81
+ self.drop = nn.Dropout(p_dropout)
82
+ self.convs_sep = nn.ModuleList()
83
+ self.convs_1x1 = nn.ModuleList()
84
+ self.norms_1 = nn.ModuleList()
85
+ self.norms_2 = nn.ModuleList()
86
+ for i in range(n_layers):
87
+ dilation = kernel_size ** i
88
+ padding = (kernel_size * dilation - dilation) // 2
89
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
90
+ groups=channels, dilation=dilation, padding=padding
91
+ ))
92
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
93
+ self.norms_1.append(LayerNorm(channels))
94
+ self.norms_2.append(LayerNorm(channels))
95
+
96
+ def forward(self, x, x_mask, g=None):
97
+ if g is not None:
98
+ x = x + g
99
+ for i in range(self.n_layers):
100
+ y = self.convs_sep[i](x * x_mask)
101
+ y = self.norms_1[i](y)
102
+ y = F.gelu(y)
103
+ y = self.convs_1x1[i](y)
104
+ y = self.norms_2[i](y)
105
+ y = F.gelu(y)
106
+ y = self.drop(y)
107
+ x = x + y
108
+ return x * x_mask
109
+
110
+
111
+ class WN(torch.nn.Module):
112
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
113
+ super(WN, self).__init__()
114
+ assert(kernel_size % 2 == 1)
115
+ self.hidden_channels =hidden_channels
116
+ self.kernel_size = kernel_size,
117
+ self.dilation_rate = dilation_rate
118
+ self.n_layers = n_layers
119
+ self.gin_channels = gin_channels
120
+ self.p_dropout = p_dropout
121
+
122
+ self.in_layers = torch.nn.ModuleList()
123
+ self.res_skip_layers = torch.nn.ModuleList()
124
+ self.drop = nn.Dropout(p_dropout)
125
+
126
+ if gin_channels != 0:
127
+ cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
128
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
129
+
130
+ for i in range(n_layers):
131
+ dilation = dilation_rate ** i
132
+ padding = int((kernel_size * dilation - dilation) / 2)
133
+ in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
134
+ dilation=dilation, padding=padding)
135
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
136
+ self.in_layers.append(in_layer)
137
+
138
+ # last one is not necessary
139
+ if i < n_layers - 1:
140
+ res_skip_channels = 2 * hidden_channels
141
+ else:
142
+ res_skip_channels = hidden_channels
143
+
144
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
145
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
146
+ self.res_skip_layers.append(res_skip_layer)
147
+
148
+ def forward(self, x, x_mask, g=None, **kwargs):
149
+ output = torch.zeros_like(x)
150
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
151
+
152
+ if g is not None:
153
+ g = self.cond_layer(g)
154
+
155
+ for i in range(self.n_layers):
156
+ x_in = self.in_layers[i](x)
157
+ if g is not None:
158
+ cond_offset = i * 2 * self.hidden_channels
159
+ g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
160
+ else:
161
+ g_l = torch.zeros_like(x_in)
162
+
163
+ acts = commons.fused_add_tanh_sigmoid_multiply(
164
+ x_in,
165
+ g_l,
166
+ n_channels_tensor)
167
+ acts = self.drop(acts)
168
+
169
+ res_skip_acts = self.res_skip_layers[i](acts)
170
+ if i < self.n_layers - 1:
171
+ res_acts = res_skip_acts[:,:self.hidden_channels,:]
172
+ x = (x + res_acts) * x_mask
173
+ output = output + res_skip_acts[:,self.hidden_channels:,:]
174
+ else:
175
+ output = output + res_skip_acts
176
+ return output * x_mask
177
+
178
+ def remove_weight_norm(self):
179
+ if self.gin_channels != 0:
180
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
181
+ for l in self.in_layers:
182
+ torch.nn.utils.remove_weight_norm(l)
183
+ for l in self.res_skip_layers:
184
+ torch.nn.utils.remove_weight_norm(l)
185
+
186
+
187
+ class ResBlock1(torch.nn.Module):
188
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
189
+ super(ResBlock1, self).__init__()
190
+ self.convs1 = nn.ModuleList([
191
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
192
+ padding=get_padding(kernel_size, dilation[0]))),
193
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
194
+ padding=get_padding(kernel_size, dilation[1]))),
195
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
196
+ padding=get_padding(kernel_size, dilation[2])))
197
+ ])
198
+ self.convs1.apply(init_weights)
199
+
200
+ self.convs2 = nn.ModuleList([
201
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
+ padding=get_padding(kernel_size, 1))),
203
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
+ padding=get_padding(kernel_size, 1))),
205
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
206
+ padding=get_padding(kernel_size, 1)))
207
+ ])
208
+ self.convs2.apply(init_weights)
209
+
210
+ def forward(self, x, x_mask=None):
211
+ for c1, c2 in zip(self.convs1, self.convs2):
212
+ xt = F.leaky_relu(x, LRELU_SLOPE)
213
+ if x_mask is not None:
214
+ xt = xt * x_mask
215
+ xt = c1(xt)
216
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
217
+ if x_mask is not None:
218
+ xt = xt * x_mask
219
+ xt = c2(xt)
220
+ x = xt + x
221
+ if x_mask is not None:
222
+ x = x * x_mask
223
+ return x
224
+
225
+ def remove_weight_norm(self):
226
+ for l in self.convs1:
227
+ remove_weight_norm(l)
228
+ for l in self.convs2:
229
+ remove_weight_norm(l)
230
+
231
+
232
+ class ResBlock2(torch.nn.Module):
233
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
234
+ super(ResBlock2, self).__init__()
235
+ self.convs = nn.ModuleList([
236
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
237
+ padding=get_padding(kernel_size, dilation[0]))),
238
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
239
+ padding=get_padding(kernel_size, dilation[1])))
240
+ ])
241
+ self.convs.apply(init_weights)
242
+
243
+ def forward(self, x, x_mask=None):
244
+ for c in self.convs:
245
+ xt = F.leaky_relu(x, LRELU_SLOPE)
246
+ if x_mask is not None:
247
+ xt = xt * x_mask
248
+ xt = c(xt)
249
+ x = xt + x
250
+ if x_mask is not None:
251
+ x = x * x_mask
252
+ return x
253
+
254
+ def remove_weight_norm(self):
255
+ for l in self.convs:
256
+ remove_weight_norm(l)
257
+
258
+
259
+ class Log(nn.Module):
260
+ def forward(self, x, x_mask, reverse=False, **kwargs):
261
+ if not reverse:
262
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
263
+ logdet = torch.sum(-y, [1, 2])
264
+ return y, logdet
265
+ else:
266
+ x = torch.exp(x) * x_mask
267
+ return x
268
+
269
+
270
+ class Flip(nn.Module):
271
+ def forward(self, x, *args, reverse=False, **kwargs):
272
+ x = torch.flip(x, [1])
273
+ if not reverse:
274
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
275
+ return x, logdet
276
+ else:
277
+ return x
278
+
279
+
280
+ class ElementwiseAffine(nn.Module):
281
+ def __init__(self, channels):
282
+ super().__init__()
283
+ self.channels = channels
284
+ self.m = nn.Parameter(torch.zeros(channels,1))
285
+ self.logs = nn.Parameter(torch.zeros(channels,1))
286
+
287
+ def forward(self, x, x_mask, reverse=False, **kwargs):
288
+ if not reverse:
289
+ y = self.m + torch.exp(self.logs) * x
290
+ y = y * x_mask
291
+ logdet = torch.sum(self.logs * x_mask, [1,2])
292
+ return y, logdet
293
+ else:
294
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
295
+ return x
296
+
297
+
298
+ class ResidualCouplingLayer(nn.Module):
299
+ def __init__(self,
300
+ channels,
301
+ hidden_channels,
302
+ kernel_size,
303
+ dilation_rate,
304
+ n_layers,
305
+ p_dropout=0,
306
+ gin_channels=0,
307
+ mean_only=False):
308
+ assert channels % 2 == 0, "channels should be divisible by 2"
309
+ super().__init__()
310
+ self.channels = channels
311
+ self.hidden_channels = hidden_channels
312
+ self.kernel_size = kernel_size
313
+ self.dilation_rate = dilation_rate
314
+ self.n_layers = n_layers
315
+ self.half_channels = channels // 2
316
+ self.mean_only = mean_only
317
+
318
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
319
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
320
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
321
+ self.post.weight.data.zero_()
322
+ self.post.bias.data.zero_()
323
+
324
+ def forward(self, x, x_mask, g=None, reverse=False):
325
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
326
+ h = self.pre(x0) * x_mask
327
+ h = self.enc(h, x_mask, g=g)
328
+ stats = self.post(h) * x_mask
329
+ if not self.mean_only:
330
+ m, logs = torch.split(stats, [self.half_channels]*2, 1)
331
+ else:
332
+ m = stats
333
+ logs = torch.zeros_like(m)
334
+
335
+ if not reverse:
336
+ x1 = m + x1 * torch.exp(logs) * x_mask
337
+ x = torch.cat([x0, x1], 1)
338
+ logdet = torch.sum(logs, [1,2])
339
+ return x, logdet
340
+ else:
341
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
342
+ x = torch.cat([x0, x1], 1)
343
+ return x
344
+
345
+
346
+ class ConvFlow(nn.Module):
347
+ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
348
+ super().__init__()
349
+ self.in_channels = in_channels
350
+ self.filter_channels = filter_channels
351
+ self.kernel_size = kernel_size
352
+ self.n_layers = n_layers
353
+ self.num_bins = num_bins
354
+ self.tail_bound = tail_bound
355
+ self.half_channels = in_channels // 2
356
+
357
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
358
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
359
+ self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
360
+ self.proj.weight.data.zero_()
361
+ self.proj.bias.data.zero_()
362
+
363
+ def forward(self, x, x_mask, g=None, reverse=False):
364
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
365
+ h = self.pre(x0)
366
+ h = self.convs(h, x_mask, g=g)
367
+ h = self.proj(h) * x_mask
368
+
369
+ b, c, t = x0.shape
370
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
371
+
372
+ unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
373
+ unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
374
+ unnormalized_derivatives = h[..., 2 * self.num_bins:]
375
+
376
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
377
+ unnormalized_widths,
378
+ unnormalized_heights,
379
+ unnormalized_derivatives,
380
+ inverse=reverse,
381
+ tails='linear',
382
+ tail_bound=self.tail_bound
383
+ )
384
+
385
+ x = torch.cat([x0, x1], 1) * x_mask
386
+ logdet = torch.sum(logabsdet * x_mask, [1,2])
387
+ if not reverse:
388
+ return x, logdet
389
+ else:
390
+ return x
391
+
392
+ class LinearNorm(nn.Module):
393
+ def __init__(self,
394
+ in_channels,
395
+ out_channels,
396
+ bias=True,
397
+ spectral_norm=False,
398
+ ):
399
+ super(LinearNorm, self).__init__()
400
+ self.fc = nn.Linear(in_channels, out_channels, bias)
401
+
402
+ if spectral_norm:
403
+ self.fc = nn.utils.spectral_norm(self.fc)
404
+
405
+ def forward(self, input):
406
+ out = self.fc(input)
407
+ return out
408
+
409
+ class Mish(nn.Module):
410
+ def __init__(self):
411
+ super(Mish, self).__init__()
412
+ def forward(self, x):
413
+ return x * torch.tanh(F.softplus(x))
414
+
415
+ class Conv1dGLU(nn.Module):
416
+ '''
417
+ Conv1d + GLU(Gated Linear Unit) with residual connection.
418
+ For GLU refer to https://arxiv.org/abs/1612.08083 paper.
419
+ '''
420
+ def __init__(self, in_channels, out_channels, kernel_size, dropout):
421
+ super(Conv1dGLU, self).__init__()
422
+ self.out_channels = out_channels
423
+ self.conv1 = ConvNorm(in_channels, 2*out_channels, kernel_size=kernel_size)
424
+ self.dropout = nn.Dropout(dropout)
425
+
426
+ def forward(self, x):
427
+ residual = x
428
+ x = self.conv1(x)
429
+ x1, x2 = torch.split(x, split_size_or_sections=self.out_channels, dim=1)
430
+ x = x1 * torch.sigmoid(x2)
431
+ x = residual + self.dropout(x)
432
+ return x
433
+
434
+ class ConvNorm(nn.Module):
435
+ def __init__(self,
436
+ in_channels,
437
+ out_channels,
438
+ kernel_size=1,
439
+ stride=1,
440
+ padding=None,
441
+ dilation=1,
442
+ bias=True,
443
+ spectral_norm=False,
444
+ ):
445
+ super(ConvNorm, self).__init__()
446
+
447
+ if padding is None:
448
+ assert(kernel_size % 2 == 1)
449
+ padding = int(dilation * (kernel_size - 1) / 2)
450
+
451
+ self.conv = torch.nn.Conv1d(in_channels,
452
+ out_channels,
453
+ kernel_size=kernel_size,
454
+ stride=stride,
455
+ padding=padding,
456
+ dilation=dilation,
457
+ bias=bias)
458
+
459
+ if spectral_norm:
460
+ self.conv = nn.utils.spectral_norm(self.conv)
461
+
462
+ def forward(self, input):
463
+ out = self.conv(input)
464
+ return out
465
+
466
+ class MultiHeadAttention(nn.Module):
467
+ ''' Multi-Head Attention module '''
468
+ def __init__(self, n_head, d_model, d_k, d_v, dropout=0., spectral_norm=False):
469
+ super().__init__()
470
+
471
+ self.n_head = n_head
472
+ self.d_k = d_k
473
+ self.d_v = d_v
474
+
475
+ self.w_qs = nn.Linear(d_model, n_head * d_k)
476
+ self.w_ks = nn.Linear(d_model, n_head * d_k)
477
+ self.w_vs = nn.Linear(d_model, n_head * d_v)
478
+
479
+ self.attention = ScaledDotProductAttention(temperature=np.power(d_model, 0.5), dropout=dropout)
480
+
481
+ self.fc = nn.Linear(n_head * d_v, d_model)
482
+ self.dropout = nn.Dropout(dropout)
483
+
484
+ if spectral_norm:
485
+ self.w_qs = nn.utils.spectral_norm(self.w_qs)
486
+ self.w_ks = nn.utils.spectral_norm(self.w_ks)
487
+ self.w_vs = nn.utils.spectral_norm(self.w_vs)
488
+ self.fc = nn.utils.spectral_norm(self.fc)
489
+
490
+ def forward(self, x, mask=None):
491
+ d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
492
+ sz_b, len_x, _ = x.size()
493
+
494
+ residual = x
495
+
496
+ q = self.w_qs(x).view(sz_b, len_x, n_head, d_k)
497
+ k = self.w_ks(x).view(sz_b, len_x, n_head, d_k)
498
+ v = self.w_vs(x).view(sz_b, len_x, n_head, d_v)
499
+ q = q.permute(2, 0, 1, 3).contiguous().view(-1,
500
+ len_x, d_k) # (n*b) x lq x dk
501
+ k = k.permute(2, 0, 1, 3).contiguous().view(-1,
502
+ len_x, d_k) # (n*b) x lk x dk
503
+ v = v.permute(2, 0, 1, 3).contiguous().view(-1,
504
+ len_x, d_v) # (n*b) x lv x dv
505
+
506
+ if mask is not None:
507
+ slf_mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
508
+ else:
509
+ slf_mask = None
510
+ output, attn = self.attention(q, k, v, mask=slf_mask)
511
+
512
+ output = output.view(n_head, sz_b, len_x, d_v)
513
+ output = output.permute(1, 2, 0, 3).contiguous().view(
514
+ sz_b, len_x, -1) # b x lq x (n*dv)
515
+
516
+ output = self.fc(output)
517
+
518
+ output = self.dropout(output) + residual
519
+ return output, attn
520
+
521
+
522
+ class ScaledDotProductAttention(nn.Module):
523
+ ''' Scaled Dot-Product Attention '''
524
+
525
+ def __init__(self, temperature, dropout):
526
+ super().__init__()
527
+ self.temperature = temperature
528
+ self.softmax = nn.Softmax(dim=2)
529
+ self.dropout = nn.Dropout(dropout)
530
+
531
+ def forward(self, q, k, v, mask=None):
532
+
533
+ attn = torch.bmm(q, k.transpose(1, 2))
534
+ attn = attn / self.temperature
535
+
536
+ if mask is not None:
537
+ attn = attn.masked_fill(mask, -np.inf)
538
+
539
+ attn = self.softmax(attn)
540
+ p_attn = self.dropout(attn)
541
+
542
+ output = torch.bmm(p_attn, v)
543
+ return output, attn