DreamSyncCo commited on
Commit
4bb7034
·
verified ·
1 Parent(s): 42e400c

Upload 12 files

Browse files
configs/astral_quantization/default_2048.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _target_: modules.astral_quantization.default_model.AstralQuantizer
2
+ tokenizer_name: "openai/whisper-small"
3
+ ssl_model_name: "facebook/hubert-large-ll60k"
4
+ ssl_output_layer: 18
5
+ encoder:
6
+ _target_: modules.astral_quantization.convnext.ConvNeXtV2Stage
7
+ dim: 512
8
+ num_blocks: 12
9
+ intermediate_dim: 1536
10
+ dilation: 1
11
+ input_dim: 1024
12
+ quantizer:
13
+ _target_: modules.astral_quantization.bsq.BinarySphericalQuantize
14
+ codebook_size: 2048 # codebook size, must be a power of 2
15
+ dim: 512
16
+ entropy_loss_weight: 0.1
17
+ diversity_gamma: 1.0
18
+ spherical: True
19
+ enable_entropy_loss: True
20
+ soft_entropy_loss: True
21
+ decoder:
22
+ _target_: modules.astral_quantization.convnext.ConvNeXtV2Stage
23
+ dim: 512
24
+ num_blocks: 12
25
+ intermediate_dim: 1536
26
+ dilation: 1
27
+ output_dim: 1024
28
+ gin_channels: 192
29
+ asr_decoder:
30
+ _target_: modules.astral_quantization.asr_decoder.ASRDecoder
31
+ hidden_dim: 768
32
+ num_heads: 12
33
+ depth: 12
34
+ block_size: 4096
35
+ in_channels: 512
36
+ n_vocab: 51866
37
+ bos_id: 50528
38
+ eos_id: 50527
39
+ dropout_rate: 0.0
40
+ attn_dropout_rate: 0.0
configs/astral_quantization/default_32.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _target_: default_model.AstralQuantizer
2
+ tokenizer_name: "openai/whisper-small"
3
+ ssl_model_name: "facebook/hubert-large-ll60k"
4
+ ssl_output_layer: 18
5
+ encoder:
6
+ _target_: modules.convnext.ConvNeXtV2Stage
7
+ dim: 512
8
+ num_blocks: 12
9
+ intermediate_dim: 1536
10
+ dilation: 1
11
+ input_dim: 1024
12
+ quantizer:
13
+ _target_: modules.bsq.BinarySphericalQuantize
14
+ codebook_size: 32 # codebook size, must be a power of 2
15
+ dim: 512
16
+ entropy_loss_weight: 0.1
17
+ diversity_gamma: 1.0
18
+ spherical: True
19
+ enable_entropy_loss: True
20
+ soft_entropy_loss: True
21
+ decoder:
22
+ _target_: modules.convnext.ConvNeXtV2Stage
23
+ dim: 512
24
+ num_blocks: 12
25
+ intermediate_dim: 1536
26
+ dilation: 1
27
+ output_dim: 1024
28
+ gin_channels: 192
29
+ asr_decoder:
30
+ _target_: modules.asr_decoder.ASRDecoder
31
+ hidden_dim: 768
32
+ num_heads: 12
33
+ depth: 12
34
+ block_size: 4096
35
+ in_channels: 512
36
+ n_vocab: 51866
37
+ bos_id: 50528
38
+ eos_id: 50527
39
+ dropout_rate: 0.0
40
+ attn_dropout_rate: 0.0
configs/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"reference_audio_path": "D:/FAcodec/test_waves/kobe_0.wav", "sg_hostapi": "MME", "sg_wasapi_exclusive": false, "sg_input_device": "\u9ea6\u514b\u98ce (Razer BlackShark V2 HS 2.4", "sg_output_device": "\u626c\u58f0\u5668 (Razer BlackShark V2 HS 2.4", "sr_type": "sr_model", "diffusion_steps": 10.0, "inference_cfg_rate": 0.0, "max_prompt_length": 3.0, "block_time": 0.7, "crossfade_length": 0.04, "extra_time": 0.5, "extra_time_right": 0.02}
configs/config_dit_mel_seed.yml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: "./runs/run_dit_mel_seed"
2
+ save_freq: 1
3
+ log_interval: 10
4
+ save_interval: 1000
5
+ device: "cuda"
6
+ epochs: 1000 # number of epochs for first stage training (pre-training)
7
+ batch_size: 4
8
+ batch_length: 100 # maximum duration of audio in a batch (in seconds)
9
+ max_len: 80 # maximum number of frames
10
+ pretrained_model: ""
11
+ pretrained_encoder: ""
12
+ load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
13
+
14
+ F0_path: "modules/JDC/bst.t7"
15
+
16
+ preprocess_params:
17
+ sr: 22050
18
+ spect_params:
19
+ n_fft: 1024
20
+ win_length: 1024
21
+ hop_length: 256
22
+ n_mels: 80
23
+
24
+ model_params:
25
+ dit_type: "DiT" # uDiT or DiT
26
+ reg_loss_type: "l2" # l1 or l2
27
+
28
+ speech_tokenizer:
29
+ path: "speech_tokenizer_v1.onnx"
30
+
31
+ style_encoder:
32
+ dim: 192
33
+ campplus_path: "campplus_cn_common.bin"
34
+
35
+ DAC:
36
+ encoder_dim: 64
37
+ encoder_rates: [2, 5, 5, 6]
38
+ decoder_dim: 1536
39
+ decoder_rates: [ 6, 5, 5, 2 ]
40
+ sr: 24000
41
+
42
+ length_regulator:
43
+ channels: 768
44
+ is_discrete: true
45
+ content_codebook_size: 4096
46
+ in_frame_rate: 50
47
+ out_frame_rate: 80
48
+ sampling_ratios: [1, 1, 1, 1]
49
+
50
+ DiT:
51
+ hidden_dim: 768
52
+ num_heads: 12
53
+ depth: 12
54
+ class_dropout_prob: 0.1
55
+ block_size: 4096
56
+ in_channels: 80
57
+ style_condition: true
58
+ final_layer_type: 'wavenet'
59
+ target: 'mel' # mel or codec
60
+ content_dim: 768
61
+ content_codebook_size: 1024
62
+ content_type: 'discrete'
63
+ f0_condition: false
64
+ n_f0_bins: 512
65
+ content_codebooks: 1
66
+ is_causal: false
67
+ long_skip_connection: true
68
+ zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
69
+
70
+ wavenet:
71
+ hidden_dim: 768
72
+ num_layers: 8
73
+ kernel_size: 5
74
+ dilation_rate: 1
75
+ p_dropout: 0.2
76
+ style_condition: true
77
+
78
+ loss_params:
79
+ base_lr: 0.0001
configs/config_dit_mel_seed_facodec_small.yml ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: "./runs/run_dit_mel_seed_facodec_small"
2
+ save_freq: 1
3
+ log_interval: 10
4
+ save_interval: 1000
5
+ device: "cuda"
6
+ epochs: 1000 # number of epochs for first stage training (pre-training)
7
+ batch_size: 2
8
+ batch_length: 100 # maximum duration of audio in a batch (in seconds)
9
+ max_len: 80 # maximum number of frames
10
+ pretrained_model: ""
11
+ pretrained_encoder: ""
12
+ load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
13
+
14
+ F0_path: "modules/JDC/bst.t7"
15
+
16
+ data_params:
17
+ train_data: "./data/train.txt"
18
+ val_data: "./data/val.txt"
19
+ root_path: "./data/"
20
+
21
+ preprocess_params:
22
+ sr: 22050
23
+ spect_params:
24
+ n_fft: 1024
25
+ win_length: 1024
26
+ hop_length: 256
27
+ n_mels: 80
28
+
29
+ model_params:
30
+ dit_type: "DiT" # uDiT or DiT
31
+ reg_loss_type: "l1" # l1 or l2
32
+
33
+ speech_tokenizer:
34
+ type: 'facodec' # facodec or cosyvoice
35
+ path: "checkpoints/speech_tokenizer_v1.onnx"
36
+
37
+ style_encoder:
38
+ dim: 192
39
+ campplus_path: "checkpoints/campplus_cn_common.bin"
40
+
41
+ DAC:
42
+ encoder_dim: 64
43
+ encoder_rates: [2, 5, 5, 6]
44
+ decoder_dim: 1536
45
+ decoder_rates: [ 6, 5, 5, 2 ]
46
+ sr: 24000
47
+
48
+ length_regulator:
49
+ channels: 512
50
+ is_discrete: true
51
+ content_codebook_size: 1024
52
+ in_frame_rate: 80
53
+ out_frame_rate: 80
54
+ sampling_ratios: [1, 1, 1, 1]
55
+ token_dropout_prob: 0.3 # probability of performing token dropout
56
+ token_dropout_range: 1.0 # maximum percentage of tokens to drop out
57
+ n_codebooks: 3
58
+ quantizer_dropout: 0.5
59
+ f0_condition: false
60
+ n_f0_bins: 512
61
+
62
+ DiT:
63
+ hidden_dim: 512
64
+ num_heads: 8
65
+ depth: 13
66
+ class_dropout_prob: 0.1
67
+ block_size: 8192
68
+ in_channels: 80
69
+ style_condition: true
70
+ final_layer_type: 'wavenet'
71
+ target: 'mel' # mel or codec
72
+ content_dim: 512
73
+ content_codebook_size: 1024
74
+ content_type: 'discrete'
75
+ f0_condition: true
76
+ n_f0_bins: 512
77
+ content_codebooks: 1
78
+ is_causal: false
79
+ long_skip_connection: true
80
+ zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
81
+ time_as_token: false
82
+ style_as_token: false
83
+ uvit_skip_connection: true
84
+ add_resblock_in_transformer: false
85
+
86
+ wavenet:
87
+ hidden_dim: 512
88
+ num_layers: 8
89
+ kernel_size: 5
90
+ dilation_rate: 1
91
+ p_dropout: 0.2
92
+ style_condition: true
93
+
94
+ loss_params:
95
+ base_lr: 0.0001
96
+ lambda_mel: 45
97
+ lambda_kl: 1.0
configs/config_dit_mel_seed_wavenet.yml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: "./runs/run_dit_mel_seed"
2
+ save_freq: 1
3
+ log_interval: 10
4
+ save_interval: 1000
5
+ device: "cuda"
6
+ epochs: 1000 # number of epochs for first stage training (pre-training)
7
+ batch_size: 4
8
+ batch_length: 100 # maximum duration of audio in a batch (in seconds)
9
+ max_len: 80 # maximum number of frames
10
+ pretrained_model: ""
11
+ pretrained_encoder: ""
12
+ load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
13
+
14
+ F0_path: "modules/JDC/bst.t7"
15
+
16
+ preprocess_params:
17
+ sr: 22050
18
+ spect_params:
19
+ n_fft: 1024
20
+ win_length: 1024
21
+ hop_length: 256
22
+ n_mels: 80
23
+
24
+ model_params:
25
+ dit_type: "DiT" # uDiT or DiT
26
+ reg_loss_type: "l2" # l1 or l2
27
+
28
+ speech_tokenizer:
29
+ path: "checkpoints/speech_tokenizer_v1.onnx"
30
+
31
+ style_encoder:
32
+ dim: 192
33
+ campplus_path: "campplus_cn_common.bin"
34
+
35
+ DAC:
36
+ encoder_dim: 64
37
+ encoder_rates: [2, 5, 5, 6]
38
+ decoder_dim: 1536
39
+ decoder_rates: [ 6, 5, 5, 2 ]
40
+ sr: 24000
41
+
42
+ length_regulator:
43
+ channels: 768
44
+ is_discrete: true
45
+ content_codebook_size: 4096
46
+ in_frame_rate: 50
47
+ out_frame_rate: 80
48
+ sampling_ratios: [1, 1, 1, 1]
49
+
50
+ DiT:
51
+ hidden_dim: 768
52
+ num_heads: 12
53
+ depth: 12
54
+ class_dropout_prob: 0.1
55
+ block_size: 8192
56
+ in_channels: 80
57
+ style_condition: true
58
+ final_layer_type: 'wavenet'
59
+ target: 'mel' # mel or codec
60
+ content_dim: 768
61
+ content_codebook_size: 1024
62
+ content_type: 'discrete'
63
+ f0_condition: false
64
+ n_f0_bins: 512
65
+ content_codebooks: 1
66
+ is_causal: false
67
+ long_skip_connection: true
68
+ zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
69
+
70
+ wavenet:
71
+ hidden_dim: 768
72
+ num_layers: 8
73
+ kernel_size: 5
74
+ dilation_rate: 1
75
+ p_dropout: 0.2
76
+ style_condition: true
77
+
78
+ loss_params:
79
+ base_lr: 0.0001
configs/hifigan.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hift:
2
+ in_channels: 80
3
+ base_channels: 512
4
+ nb_harmonics: 8
5
+ sampling_rate: 22050
6
+ nsf_alpha: 0.1
7
+ nsf_sigma: 0.003
8
+ nsf_voiced_threshold: 10
9
+ upsample_rates: [8, 8]
10
+ upsample_kernel_sizes: [16, 16]
11
+ istft_params:
12
+ n_fft: 16
13
+ hop_len: 4
14
+ resblock_kernel_sizes: [3, 7, 11]
15
+ resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
16
+ source_resblock_kernel_sizes: [7, 11]
17
+ source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5]]
18
+ lrelu_slope: 0.1
19
+ audio_limit: 0.99
20
+ f0_predictor:
21
+ num_class: 1
22
+ in_channels: 80
23
+ cond_channels: 512
24
+
25
+ pretrained_model_path: "checkpoints/hift.pt"
configs/inuse/.gitignore ADDED
File without changes
configs/inuse/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"reference_audio_path": "D:/seed-vc/examples/reference/trump_0.wav", "sg_hostapi": "MME", "sg_wasapi_exclusive": false, "sg_input_device": "\u9ea6\u514b\u98ce (Razer BlackShark V2 HS USB", "sg_output_device": "\u626c\u58f0\u5668 (Razer BlackShark V2 HS USB", "sr_type": "sr_model", "diffusion_steps": 8.0, "inference_cfg_rate": 0.7, "max_prompt_length": 3.0, "block_time": 0.58, "crossfade_length": 0.04, "extra_time_ce": 2.5, "extra_time": 0.5, "extra_time_right": 0.02}
configs/presets/config_dit_mel_seed_uvit_whisper_base_f0_44k.yml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: "./runs"
2
+ save_freq: 1
3
+ log_interval: 10
4
+ save_interval: 1000
5
+ device: "cuda"
6
+ epochs: 1000 # number of epochs for first stage training (pre-training)
7
+ batch_size: 1
8
+ batch_length: 100 # maximum duration of audio in a batch (in seconds)
9
+ max_len: 80 # maximum number of frames
10
+ pretrained_model: "DiT_seed_v2_uvit_whisper_base_f0_44k_bigvgan_pruned_ft_ema.pth"
11
+ pretrained_encoder: ""
12
+ load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
13
+
14
+ preprocess_params:
15
+ sr: 44100
16
+ spect_params:
17
+ n_fft: 2048
18
+ win_length: 2048
19
+ hop_length: 512
20
+ n_mels: 128
21
+ fmin: 0
22
+ fmax: "None"
23
+
24
+ model_params:
25
+ dit_type: "DiT" # uDiT or DiT
26
+ reg_loss_type: "l1" # l1 or l2
27
+
28
+ timbre_shifter:
29
+ se_db_path: "./modules/openvoice/checkpoints_v2/converter/se_db.pt"
30
+ ckpt_path: './modules/openvoice/checkpoints_v2/converter'
31
+
32
+ vocoder:
33
+ type: "bigvgan"
34
+ name: "nvidia/bigvgan_v2_44khz_128band_512x"
35
+
36
+ speech_tokenizer:
37
+ type: 'whisper'
38
+ name: "openai/whisper-small"
39
+
40
+ style_encoder:
41
+ dim: 192
42
+ campplus_path: "campplus_cn_common.bin"
43
+
44
+ DAC:
45
+ encoder_dim: 64
46
+ encoder_rates: [2, 5, 5, 6]
47
+ decoder_dim: 1536
48
+ decoder_rates: [ 6, 5, 5, 2 ]
49
+ sr: 24000
50
+
51
+ length_regulator:
52
+ channels: 768
53
+ is_discrete: false
54
+ in_channels: 768
55
+ content_codebook_size: 2048
56
+ sampling_ratios: [1, 1, 1, 1]
57
+ vector_quantize: false
58
+ n_codebooks: 1
59
+ quantizer_dropout: 0.0
60
+ f0_condition: true
61
+ n_f0_bins: 256
62
+
63
+ DiT:
64
+ hidden_dim: 768
65
+ num_heads: 12
66
+ depth: 17
67
+ class_dropout_prob: 0.1
68
+ block_size: 8192
69
+ in_channels: 128
70
+ style_condition: true
71
+ final_layer_type: 'mlp'
72
+ target: 'mel' # mel or codec
73
+ content_dim: 768
74
+ content_codebook_size: 1024
75
+ content_type: 'discrete'
76
+ f0_condition: true
77
+ n_f0_bins: 256
78
+ content_codebooks: 1
79
+ is_causal: false
80
+ long_skip_connection: false
81
+ zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
82
+ time_as_token: false
83
+ style_as_token: false
84
+ uvit_skip_connection: true
85
+ add_resblock_in_transformer: false
86
+
87
+ wavenet:
88
+ hidden_dim: 768
89
+ num_layers: 8
90
+ kernel_size: 5
91
+ dilation_rate: 1
92
+ p_dropout: 0.2
93
+ style_condition: true
94
+
95
+ loss_params:
96
+ base_lr: 0.0001
97
+ lambda_mel: 45
98
+ lambda_kl: 1.0
configs/presets/config_dit_mel_seed_uvit_whisper_small_wavenet.yml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: "./runs"
2
+ save_freq: 1
3
+ log_interval: 10
4
+ save_interval: 1000
5
+ device: "cuda"
6
+ epochs: 1000 # number of epochs for first stage training (pre-training)
7
+ batch_size: 2
8
+ batch_length: 100 # maximum duration of audio in a batch (in seconds)
9
+ max_len: 80 # maximum number of frames
10
+ pretrained_model: "DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth"
11
+ pretrained_encoder: ""
12
+ load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
13
+
14
+ preprocess_params:
15
+ sr: 22050
16
+ spect_params:
17
+ n_fft: 1024
18
+ win_length: 1024
19
+ hop_length: 256
20
+ n_mels: 80
21
+ fmin: 0
22
+ fmax: "None"
23
+
24
+ model_params:
25
+ dit_type: "DiT" # uDiT or DiT
26
+ reg_loss_type: "l1" # l1 or l2
27
+
28
+ timbre_shifter:
29
+ se_db_path: "./modules/openvoice/checkpoints_v2/converter/se_db.pt"
30
+ ckpt_path: './modules/openvoice/checkpoints_v2/converter'
31
+
32
+ speech_tokenizer:
33
+ type: 'whisper'
34
+ name: "openai/whisper-small"
35
+
36
+ style_encoder:
37
+ dim: 192
38
+ campplus_path: "campplus_cn_common.bin"
39
+
40
+ vocoder:
41
+ type: "bigvgan"
42
+ name: "nvidia/bigvgan_v2_22khz_80band_256x"
43
+
44
+ length_regulator:
45
+ channels: 512
46
+ is_discrete: false
47
+ in_channels: 768
48
+ content_codebook_size: 2048
49
+ sampling_ratios: [1, 1, 1, 1]
50
+ vector_quantize: false
51
+ n_codebooks: 1
52
+ quantizer_dropout: 0.0
53
+ f0_condition: false
54
+ n_f0_bins: 512
55
+
56
+ DiT:
57
+ hidden_dim: 512
58
+ num_heads: 8
59
+ depth: 13
60
+ class_dropout_prob: 0.1
61
+ block_size: 8192
62
+ in_channels: 80
63
+ style_condition: true
64
+ final_layer_type: 'wavenet'
65
+ target: 'mel' # mel or codec
66
+ content_dim: 512
67
+ content_codebook_size: 1024
68
+ content_type: 'discrete'
69
+ f0_condition: false
70
+ n_f0_bins: 512
71
+ content_codebooks: 1
72
+ is_causal: false
73
+ long_skip_connection: true
74
+ zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
75
+ time_as_token: false
76
+ style_as_token: false
77
+ uvit_skip_connection: true
78
+ add_resblock_in_transformer: false
79
+
80
+ wavenet:
81
+ hidden_dim: 512
82
+ num_layers: 8
83
+ kernel_size: 5
84
+ dilation_rate: 1
85
+ p_dropout: 0.2
86
+ style_condition: true
87
+
88
+ loss_params:
89
+ base_lr: 0.0001
90
+ lambda_mel: 45
91
+ lambda_kl: 1.0
configs/presets/config_dit_mel_seed_uvit_xlsr_tiny.yml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: "./runs/"
2
+ save_freq: 1
3
+ log_interval: 10
4
+ save_interval: 500
5
+ device: "cuda"
6
+ epochs: 1000 # number of epochs for first stage training (pre-training)
7
+ batch_size: 2
8
+ batch_length: 100 # maximum duration of audio in a batch (in seconds)
9
+ max_len: 80 # maximum number of frames
10
+ pretrained_model: "DiT_uvit_tat_xlsr_ema.pth"
11
+ pretrained_encoder: ""
12
+ load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
13
+
14
+ preprocess_params:
15
+ sr: 22050
16
+ spect_params:
17
+ n_fft: 1024
18
+ win_length: 1024
19
+ hop_length: 256
20
+ n_mels: 80
21
+ fmin: 0
22
+ fmax: 8000
23
+
24
+ model_params:
25
+ dit_type: "DiT" # uDiT or DiT
26
+ reg_loss_type: "l1" # l1 or l2
27
+ diffusion_type: "flow"
28
+
29
+ timbre_shifter:
30
+ se_db_path: "./modules/openvoice/checkpoints_v2/converter/se_db.pt"
31
+ ckpt_path: './modules/openvoice/checkpoints_v2/converter'
32
+
33
+ vocoder:
34
+ type: "hifigan"
35
+
36
+ speech_tokenizer:
37
+ type: 'xlsr'
38
+ output_layer: 12
39
+ name: 'facebook/wav2vec2-xls-r-300m'
40
+
41
+ style_encoder:
42
+ dim: 192
43
+ campplus_path: "campplus_cn_common.bin"
44
+
45
+ length_regulator:
46
+ channels: 384
47
+ is_discrete: false
48
+ in_channels: 1024
49
+ content_codebook_size: 1024
50
+ sampling_ratios: [1, 1, 1, 1]
51
+ vector_quantize: false
52
+ n_codebooks: 2
53
+ quantizer_dropout: 0.0
54
+ f0_condition: false
55
+ n_f0_bins: 512
56
+
57
+ DiT:
58
+ hidden_dim: 384
59
+ num_heads: 6
60
+ depth: 9
61
+ class_dropout_prob: 0.1
62
+ block_size: 8192
63
+ in_channels: 80
64
+ style_condition: true
65
+ final_layer_type: 'mlp'
66
+ target: 'mel' # mel or betavae
67
+ content_dim: 384
68
+ content_codebook_size: 1024
69
+ content_type: 'discrete'
70
+ f0_condition: false
71
+ n_f0_bins: 512
72
+ content_codebooks: 1
73
+ is_causal: false
74
+ long_skip_connection: false
75
+ zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
76
+ time_as_token: true
77
+ style_as_token: true
78
+ uvit_skip_connection: true
79
+ add_resblock_in_transformer: false
80
+
81
+ loss_params:
82
+ base_lr: 0.0001