SirAB commited on
Commit
8d3e8b4
·
verified ·
1 Parent(s): bc89775

Upload folder using huggingface_hub

Browse files
checkpoint/epoch_2nd_00000.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ddefc40cb98399391c907a455c9f7916a43a4dc47a19ce14cde654f915968c85
3
- size 1926290769
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbc343e4a7795475b6346964c28ee22f97cb023554155ea8c1c1f526b871f89f
3
+ size 1926291032
config_ft.yml CHANGED
@@ -1,112 +1,21 @@
1
- log_dir: "Models/LJSpeech"
2
- save_freq: 1
3
- log_interval: 10
4
- device: "cuda"
5
- epochs: 20 # number of finetuning epoch (1 hour of data)
6
- batch_size: 2
7
- max_len: 290 # maximum number of frames
8
- pretrained_model: "epoch_1st_00000.pth"
9
- second_stage_load_pretrained: true # set to true if the pre-trained model is for 2nd stage
10
- load_only_params: false # set to true if do not want to load epoch numbers and optimizer parameters
11
-
12
- F0_path: "Utils/JDC/bst.t7"
13
- ASR_config: "Utils/ASR/config.yml"
14
- ASR_path: "Utils/ASR/epoch_00080.pth"
15
- PLBERT_dir: 'Utils/PLBERT/'
16
-
17
- data_params:
18
- train_data: "Data/train_list.txt"
19
- val_data: "Data/val_list.txt"
20
- root_path: "/content/StyleTTS2/Dataset"
21
- OOD_data: "Data/OOD_texts.txt"
22
- min_length: 50 # sample until texts with this size are obtained for OOD texts
23
-
24
- preprocess_params:
25
- sr: 24000
26
- spect_params:
27
- n_fft: 2048
28
- win_length: 1200
29
- hop_length: 300
30
-
31
- model_params:
32
- multispeaker: true
33
-
34
- dim_in: 64
35
- hidden_dim: 512
36
- max_conv_dim: 512
37
- n_layer: 3
38
- n_mels: 80
39
-
40
- n_token: 178 # number of phoneme tokens
41
- max_dur: 50 # maximum duration of a single phoneme
42
- style_dim: 128 # style vector size
43
-
44
- dropout: 0.2
45
-
46
- # config for decoder
47
- decoder:
48
- type: 'istftnet' # either hifigan or istftnet
49
- resblock_kernel_sizes: [3,7,11]
50
- upsample_rates : [10, 6]
51
- gen_istft_hop_size: 5
52
- gen_istft_n_fft: 20
53
- upsample_initial_channel: 512
54
- resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]]
55
- upsample_kernel_sizes: [20, 12]
56
-
57
- # speech language model config
58
- slm:
59
- model: 'microsoft/wavlm-base-plus'
60
- sr: 16000 # sampling rate of SLM
61
- hidden: 768 # hidden size of SLM
62
- nlayers: 13 # number of layers of SLM
63
- initial_channel: 64 # initial channels of SLM discriminator head
64
-
65
- # style diffusion model config
66
- diffusion:
67
- embedding_mask_proba: 0.1
68
- # transformer config
69
- transformer:
70
- num_layers: 3
71
- num_heads: 8
72
- head_features: 64
73
- multiplier: 2
74
-
75
- # diffusion distribution config
76
- dist:
77
- sigma_data: 0.2 # placeholder for estimate_sigma_data set to false
78
- estimate_sigma_data: true # estimate sigma_data from the current batch if set to true
79
- mean: -3.0
80
- std: 1.0
81
-
82
- loss_params:
83
- lambda_mel: 5. # mel reconstruction loss
84
- lambda_gen: 1. # generator loss
85
- lambda_slm: 1. # slm feature matching loss
86
-
87
- lambda_mono: 1. # monotonic alignment loss (TMA)
88
- lambda_s2s: 1. # sequence-to-sequence loss (TMA)
89
-
90
- lambda_F0: 1. # F0 reconstruction loss
91
- lambda_norm: 1. # norm reconstruction loss
92
- lambda_dur: 1. # duration loss
93
- lambda_ce: 20. # duration predictor probability output CE loss
94
- lambda_sty: 1. # style reconstruction loss
95
- lambda_diff: 1. # score matching loss
96
-
97
- diff_epoch: 1000000000000000000000000 # style diffusion starting epoch
98
- joint_epoch: 3000000000000000000000000 # joint training starting epoch
99
-
100
- optimizer_params:
101
- lr: 0.0001 # general learning rate
102
- bert_lr: 0.00001 # learning rate for PLBERT
103
- ft_lr: 0.0001 # learning rate for acoustic modules
104
-
105
- slmadv_params:
106
- min_len: 400 # minimum length of samples
107
- max_len: 500 # maximum length of samples
108
- batch_percentage: 0.5 # to prevent out of memory, only use half of the original batch size
109
- iter: 10 # update the discriminator every this iterations of generator update
110
- thresh: 5 # gradient norm above which the gradient is scaled
111
- scale: 0.01 # gradient scaling factor for predictors from SLM discriminators
112
- sig: 1.5 # sigma for differentiable duration modeling
 
1
+ {ASR_config: Utils/ASR/config.yml, ASR_path: Utils/ASR/epoch_00080.pth, F0_path: Utils/JDC/bst.t7,
2
+ PLBERT_dir: Utils/PLBERT/, batch_size: 2, data_params: {OOD_data: Data/OOD_texts.txt,
3
+ min_length: 50, root_path: /content/StyleTTS2/Dataset, train_data: Data/train_list.txt,
4
+ val_data: Data/val_list.txt}, device: cuda, epochs: 20, load_only_params: false,
5
+ log_dir: Models/LJSpeech, log_interval: 10, loss_params: {diff_epoch: 1000000000000000000000000,
6
+ joint_epoch: 3000000000000000000000000, lambda_F0: 1.0, lambda_ce: 20.0, lambda_diff: 1.0,
7
+ lambda_dur: 1.0, lambda_gen: 1.0, lambda_mel: 5.0, lambda_mono: 1.0, lambda_norm: 1.0,
8
+ lambda_s2s: 1.0, lambda_slm: 1.0, lambda_sty: 1.0}, max_len: 290, model_params: {
9
+ decoder: {gen_istft_hop_size: 5, gen_istft_n_fft: 20, resblock_dilation_sizes: [
10
+ [1, 3, 5], [1, 3, 5], [1, 3, 5]], resblock_kernel_sizes: [3, 7, 11], type: istftnet,
11
+ upsample_initial_channel: 512, upsample_kernel_sizes: [20, 12], upsample_rates: [
12
+ 10, 6]}, diffusion: {dist: {estimate_sigma_data: true, mean: -3.0, sigma_data: .nan,
13
+ std: 1.0}, embedding_mask_proba: 0.1, transformer: {head_features: 64, multiplier: 2,
14
+ num_heads: 8, num_layers: 3}}, dim_in: 64, dropout: 0.2, hidden_dim: 512,
15
+ max_conv_dim: 512, max_dur: 50, multispeaker: true, n_layer: 3, n_mels: 80, n_token: 178,
16
+ slm: {hidden: 768, initial_channel: 64, model: microsoft/wavlm-base-plus, nlayers: 13,
17
+ sr: 16000}, style_dim: 128}, optimizer_params: {bert_lr: 1.0e-05, ft_lr: 0.0001,
18
+ lr: 0.0001}, preprocess_params: {spect_params: {hop_length: 300, n_fft: 2048,
19
+ win_length: 1200}, sr: 24000}, pretrained_model: epoch_1st_00000.pth, save_freq: 1,
20
+ second_stage_load_pretrained: true, slmadv_params: {batch_percentage: 0.5, iter: 10,
21
+ max_len: 500, min_len: 400, scale: 0.01, sig: 1.5, thresh: 5}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
epoch_1st_00000.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ddefc40cb98399391c907a455c9f7916a43a4dc47a19ce14cde654f915968c85
3
- size 1926290769
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbc343e4a7795475b6346964c28ee22f97cb023554155ea8c1c1f526b871f89f
3
+ size 1926291032
train.log CHANGED
The diff for this file is too large to render. See raw diff