Akjava commited on
Commit
37d83c7
·
1 Parent(s): 4c7b8d1
runs/2024-09-09_09-40-19/.hydra/config.yaml ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_name: train
2
+ run_name: ja004
3
+ tags:
4
+ - ja004
5
+ train: true
6
+ test: true
7
+ ckpt_path: datas/ja004/checkpoint_epoch=4999.ckpt
8
+ seed: 1234
9
+ data:
10
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
11
+ name: cvm001
12
+ train_filelist_path: datas/ja004/stage5_train.txt
13
+ valid_filelist_path: datas/ja004/stage5_valid.txt
14
+ batch_size: 90
15
+ num_workers: 2
16
+ pin_memory: true
17
+ cleaners:
18
+ - basic_cleaners2
19
+ add_blank: true
20
+ n_spks: 100
21
+ n_fft: 1024
22
+ n_feats: 80
23
+ sample_rate: 22050
24
+ hop_length: 256
25
+ win_length: 1024
26
+ f_min: 0
27
+ f_max: 8000
28
+ data_statistics:
29
+ mel_mean: -5.714534282684326
30
+ mel_std: 2.280709743499756
31
+ seed: ${seed}
32
+ model:
33
+ _target_: matcha.models.matcha_tts.MatchaTTS
34
+ n_vocab: 178
35
+ n_spks: ${data.n_spks}
36
+ spk_emb_dim: 64
37
+ n_feats: 80
38
+ data_statistics: ${data.data_statistics}
39
+ out_size: null
40
+ prior_loss: true
41
+ encoder:
42
+ encoder_type: RoPE Encoder
43
+ encoder_params:
44
+ n_feats: ${model.n_feats}
45
+ n_channels: 192
46
+ filter_channels: 768
47
+ filter_channels_dp: 256
48
+ n_heads: 2
49
+ n_layers: 6
50
+ kernel_size: 3
51
+ p_dropout: 0.1
52
+ spk_emb_dim: 64
53
+ n_spks: 1
54
+ prenet: true
55
+ duration_predictor_params:
56
+ filter_channels_dp: ${model.encoder.encoder_params.filter_channels_dp}
57
+ kernel_size: 3
58
+ p_dropout: ${model.encoder.encoder_params.p_dropout}
59
+ decoder:
60
+ channels:
61
+ - 256
62
+ - 256
63
+ dropout: 0.05
64
+ attention_head_dim: 64
65
+ n_blocks: 1
66
+ num_mid_blocks: 2
67
+ num_heads: 2
68
+ act_fn: snakebeta
69
+ cfm:
70
+ name: CFM
71
+ solver: euler
72
+ sigma_min: 0.0001
73
+ optimizer:
74
+ _target_: torch.optim.Adam
75
+ _partial_: true
76
+ lr: 0.0001
77
+ weight_decay: 0.0
78
+ callbacks:
79
+ model_checkpoint:
80
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
81
+ dirpath: ${paths.output_dir}/checkpoints
82
+ filename: checkpoint_{epoch:03d}
83
+ monitor: epoch
84
+ verbose: false
85
+ save_last: true
86
+ save_top_k: 25
87
+ mode: max
88
+ auto_insert_metric_name: true
89
+ save_weights_only: false
90
+ every_n_train_steps: null
91
+ train_time_interval: null
92
+ every_n_epochs: 50
93
+ save_on_train_epoch_end: null
94
+ model_summary:
95
+ _target_: lightning.pytorch.callbacks.RichModelSummary
96
+ max_depth: 3
97
+ rich_progress_bar:
98
+ _target_: lightning.pytorch.callbacks.RichProgressBar
99
+ logger:
100
+ tensorboard:
101
+ _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
102
+ save_dir: ${paths.output_dir}/tensorboard/
103
+ name: null
104
+ log_graph: false
105
+ default_hp_metric: true
106
+ prefix: ''
107
+ trainer:
108
+ _target_: lightning.pytorch.trainer.Trainer
109
+ default_root_dir: ${paths.output_dir}
110
+ max_epochs: -1
111
+ accelerator: gpu
112
+ devices:
113
+ - 0
114
+ precision: 16-mixed
115
+ check_val_every_n_epoch: 1
116
+ deterministic: false
117
+ gradient_clip_val: 5.0
118
+ paths:
119
+ root_dir: ${oc.env:PROJECT_ROOT}
120
+ data_dir: ${paths.root_dir}/data/
121
+ log_dir: ${paths.root_dir}/logs/
122
+ output_dir: ${hydra:runtime.output_dir}
123
+ work_dir: ${hydra:runtime.cwd}
124
+ extras:
125
+ ignore_warnings: false
126
+ enforce_tags: true
127
+ print_config: true
runs/2024-09-09_09-40-19/.hydra/hydra.yaml ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${paths.log_dir}/${task_name}/${run_name}/runs/${now:%Y-%m-%d}_${now:%H-%M-%S}
4
+ sweep:
5
+ dir: ${paths.log_dir}/${task_name}/${run_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ colorlog:
72
+ (): colorlog.ColoredFormatter
73
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
74
+ handlers:
75
+ console:
76
+ class: logging.StreamHandler
77
+ formatter: colorlog
78
+ stream: ext://sys.stdout
79
+ root:
80
+ level: INFO
81
+ handlers:
82
+ - console
83
+ disable_existing_loggers: false
84
+ job_logging:
85
+ version: 1
86
+ formatters:
87
+ simple:
88
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
89
+ colorlog:
90
+ (): colorlog.ColoredFormatter
91
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
92
+ - %(message)s'
93
+ log_colors:
94
+ DEBUG: purple
95
+ INFO: green
96
+ WARNING: yellow
97
+ ERROR: red
98
+ CRITICAL: red
99
+ handlers:
100
+ console:
101
+ class: logging.StreamHandler
102
+ formatter: colorlog
103
+ stream: ext://sys.stdout
104
+ file:
105
+ class: logging.FileHandler
106
+ formatter: simple
107
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
108
+ root:
109
+ level: INFO
110
+ handlers:
111
+ - console
112
+ - file
113
+ disable_existing_loggers: false
114
+ env: {}
115
+ mode: RUN
116
+ searchpath: []
117
+ callbacks: {}
118
+ output_subdir: .hydra
119
+ overrides:
120
+ hydra:
121
+ - hydra.mode=RUN
122
+ task:
123
+ - experiment=ja004
124
+ job:
125
+ name: train
126
+ chdir: null
127
+ override_dirname: experiment=ja004
128
+ id: ???
129
+ num: ???
130
+ config_name: train.yaml
131
+ env_set: {}
132
+ env_copy: []
133
+ config:
134
+ override_dirname:
135
+ kv_sep: '='
136
+ item_sep: ','
137
+ exclude_keys: []
138
+ runtime:
139
+ version: 1.3.2
140
+ version_base: '1.3'
141
+ cwd: /notebooks/Matcha-TTS
142
+ config_sources:
143
+ - path: hydra.conf
144
+ schema: pkg
145
+ provider: hydra
146
+ - path: /notebooks/Matcha-TTS/configs
147
+ schema: file
148
+ provider: main
149
+ - path: hydra_plugins.hydra_colorlog.conf
150
+ schema: pkg
151
+ provider: hydra-colorlog
152
+ - path: ''
153
+ schema: structured
154
+ provider: schema
155
+ output_dir: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_09-40-19
156
+ choices:
157
+ debug: null
158
+ local: default
159
+ hparams_search: null
160
+ experiment: ja004
161
+ hydra: default
162
+ extras: default
163
+ paths: default
164
+ trainer: default
165
+ logger: tensorboard
166
+ callbacks: default
167
+ model: matcha
168
+ model/optimizer: adam.yaml
169
+ model/cfm: default.yaml
170
+ model/decoder: default.yaml
171
+ model/encoder: default.yaml
172
+ data: ja004.yaml
173
+ hydra/env: default
174
+ hydra/callbacks: null
175
+ hydra/job_logging: colorlog
176
+ hydra/hydra_logging: colorlog
177
+ hydra/hydra_help: default
178
+ hydra/help: default
179
+ hydra/sweeper: basic
180
+ hydra/launcher: basic
181
+ hydra/output: default
182
+ verbose: false
runs/2024-09-09_09-40-19/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ - experiment=ja004
runs/2024-09-09_09-40-19/checkpoints/checkpoint_epoch=5049.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75ec591b861ca5707e163799f5c2b6c100202b71c4390b768d2c93bc99268582
3
+ size 250674630
runs/2024-09-09_09-40-19/checkpoints/last.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef1177e07319397fd7096d8990ef3d00d94ee4b232a7498a4a0df7d481b643a7
3
+ size 250674630
runs/2024-09-09_09-40-19/config_tree.log ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONFIG
2
+ ├── data
3
+ │ └── _target_: matcha.data.text_mel_datamodule.TextMelDataModule
4
+ │ name: cvm001
5
+ │ train_filelist_path: datas/ja004/stage5_train.txt
6
+ │ valid_filelist_path: datas/ja004/stage5_valid.txt
7
+ │ batch_size: 90
8
+ │ num_workers: 2
9
+ │ pin_memory: true
10
+ │ cleaners:
11
+ │ - basic_cleaners2
12
+ │ add_blank: true
13
+ │ n_spks: 100
14
+ │ n_fft: 1024
15
+ │ n_feats: 80
16
+ │ sample_rate: 22050
17
+ │ hop_length: 256
18
+ │ win_length: 1024
19
+ │ f_min: 0
20
+ │ f_max: 8000
21
+ │ data_statistics:
22
+ │ mel_mean: -5.714534282684326
23
+ │ mel_std: 2.280709743499756
24
+ │ seed: 1234
25
+
26
+ ├── model
27
+ │ └── _target_: matcha.models.matcha_tts.MatchaTTS
28
+ │ n_vocab: 178
29
+ │ n_spks: 100
30
+ │ spk_emb_dim: 64
31
+ │ n_feats: 80
32
+ │ data_statistics:
33
+ │ mel_mean: -5.714534282684326
34
+ │ mel_std: 2.280709743499756
35
+ │ out_size: null
36
+ │ prior_loss: true
37
+ │ encoder:
38
+ │ encoder_type: RoPE Encoder
39
+ │ encoder_params:
40
+ │ n_feats: 80
41
+ │ n_channels: 192
42
+ │ filter_channels: 768
43
+ │ filter_channels_dp: 256
44
+ │ n_heads: 2
45
+ │ n_layers: 6
46
+ │ kernel_size: 3
47
+ │ p_dropout: 0.1
48
+ │ spk_emb_dim: 64
49
+ │ n_spks: 1
50
+ │ prenet: true
51
+ │ duration_predictor_params:
52
+ │ filter_channels_dp: 256
53
+ │ kernel_size: 3
54
+ │ p_dropout: 0.1
55
+ │ decoder:
56
+ │ channels:
57
+ │ - 256
58
+ │ - 256
59
+ │ dropout: 0.05
60
+ │ attention_head_dim: 64
61
+ │ n_blocks: 1
62
+ │ num_mid_blocks: 2
63
+ │ num_heads: 2
64
+ │ act_fn: snakebeta
65
+ │ cfm:
66
+ │ name: CFM
67
+ │ solver: euler
68
+ │ sigma_min: 0.0001
69
+ │ optimizer:
70
+ │ _target_: torch.optim.Adam
71
+ │ _partial_: true
72
+ │ lr: 0.0001
73
+ │ weight_decay: 0.0
74
+
75
+ ├── callbacks
76
+ │ └── model_checkpoint:
77
+ │ _target_: lightning.pytorch.callbacks.ModelCheckpoint
78
+ │ dirpath: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_09-40-19/checkpoints
79
+ │ filename: checkpoint_{epoch:03d}
80
+ │ monitor: epoch
81
+ │ verbose: false
82
+ │ save_last: true
83
+ │ save_top_k: 25
84
+ │ mode: max
85
+ │ auto_insert_metric_name: true
86
+ │ save_weights_only: false
87
+ │ every_n_train_steps: null
88
+ │ train_time_interval: null
89
+ │ every_n_epochs: 50
90
+ │ save_on_train_epoch_end: null
91
+ │ model_summary:
92
+ │ _target_: lightning.pytorch.callbacks.RichModelSummary
93
+ │ max_depth: 3
94
+ │ rich_progress_bar:
95
+ │ _target_: lightning.pytorch.callbacks.RichProgressBar
96
+
97
+ ├── logger
98
+ │ └── tensorboard:
99
+ │ _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
100
+ │ save_dir: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_09-40-19/tensorboard/
101
+ │ name: null
102
+ │ log_graph: false
103
+ │ default_hp_metric: true
104
+ │ prefix: ''
105
+
106
+ ├── trainer
107
+ │ └── _target_: lightning.pytorch.trainer.Trainer
108
+ │ default_root_dir: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_09-40-19
109
+ │ max_epochs: -1
110
+ │ accelerator: gpu
111
+ │ devices:
112
+ │ - 0
113
+ │ precision: 16-mixed
114
+ │ check_val_every_n_epoch: 1
115
+ │ deterministic: false
116
+ │ gradient_clip_val: 5.0
117
+
118
+ ├── paths
119
+ │ └── root_dir: /notebooks/Matcha-TTS
120
+ │ data_dir: /notebooks/Matcha-TTS/data/
121
+ │ log_dir: /notebooks/Matcha-TTS/logs/
122
+ │ output_dir: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_09-40-19
123
+ │ work_dir: /notebooks/Matcha-TTS
124
+
125
+ ├── extras
126
+ │ └── ignore_warnings: false
127
+ │ enforce_tags: true
128
+ │ print_config: true
129
+
130
+ ├── task_name
131
+ │ └── train
132
+ ├── run_name
133
+ │ └── ja004
134
+ ├── tags
135
+ │ └── ['ja004']
136
+ ├── train
137
+ │ └── True
138
+ ├── test
139
+ │ └── True
140
+ ├── ckpt_path
141
+ │ └── datas/ja004/checkpoint_epoch=4999.ckpt
142
+ └── seed
143
+ └── 1234
runs/2024-09-09_09-40-19/tags.log ADDED
@@ -0,0 +1 @@
 
 
1
+ ['ja004']
runs/2024-09-09_09-40-19/tensorboard/version_0/events.out.tfevents.1725874820.nap53itixi.3382.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:250ab1209d797297d065a01dffd890916761e4a023007b3025fbb2da52608a0e
3
+ size 20068576
runs/2024-09-09_09-40-19/tensorboard/version_0/hparams.yaml ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ _target_: matcha.models.matcha_tts.MatchaTTS
3
+ n_vocab: 178
4
+ n_spks: ${data.n_spks}
5
+ spk_emb_dim: 64
6
+ n_feats: 80
7
+ data_statistics: ${data.data_statistics}
8
+ out_size: null
9
+ prior_loss: true
10
+ encoder:
11
+ encoder_type: RoPE Encoder
12
+ encoder_params:
13
+ n_feats: ${model.n_feats}
14
+ n_channels: 192
15
+ filter_channels: 768
16
+ filter_channels_dp: 256
17
+ n_heads: 2
18
+ n_layers: 6
19
+ kernel_size: 3
20
+ p_dropout: 0.1
21
+ spk_emb_dim: 64
22
+ n_spks: 1
23
+ prenet: true
24
+ duration_predictor_params:
25
+ filter_channels_dp: ${model.encoder.encoder_params.filter_channels_dp}
26
+ kernel_size: 3
27
+ p_dropout: ${model.encoder.encoder_params.p_dropout}
28
+ decoder:
29
+ channels:
30
+ - 256
31
+ - 256
32
+ dropout: 0.05
33
+ attention_head_dim: 64
34
+ n_blocks: 1
35
+ num_mid_blocks: 2
36
+ num_heads: 2
37
+ act_fn: snakebeta
38
+ cfm:
39
+ name: CFM
40
+ solver: euler
41
+ sigma_min: 0.0001
42
+ optimizer:
43
+ _target_: torch.optim.Adam
44
+ _partial_: true
45
+ lr: 0.0001
46
+ weight_decay: 0.0
47
+ model/params/total: 20856993
48
+ model/params/trainable: 20856993
49
+ model/params/non_trainable: 0
50
+ data:
51
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
52
+ name: cvm001
53
+ train_filelist_path: datas/ja004/stage5_train.txt
54
+ valid_filelist_path: datas/ja004/stage5_valid.txt
55
+ batch_size: 90
56
+ num_workers: 2
57
+ pin_memory: true
58
+ cleaners:
59
+ - basic_cleaners2
60
+ add_blank: true
61
+ n_spks: 100
62
+ n_fft: 1024
63
+ n_feats: 80
64
+ sample_rate: 22050
65
+ hop_length: 256
66
+ win_length: 1024
67
+ f_min: 0
68
+ f_max: 8000
69
+ data_statistics:
70
+ mel_mean: -5.714534282684326
71
+ mel_std: 2.280709743499756
72
+ seed: ${seed}
73
+ trainer:
74
+ _target_: lightning.pytorch.trainer.Trainer
75
+ default_root_dir: ${paths.output_dir}
76
+ max_epochs: -1
77
+ accelerator: gpu
78
+ devices:
79
+ - 0
80
+ precision: 16-mixed
81
+ check_val_every_n_epoch: 1
82
+ deterministic: false
83
+ gradient_clip_val: 5.0
84
+ callbacks:
85
+ model_checkpoint:
86
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
87
+ dirpath: ${paths.output_dir}/checkpoints
88
+ filename: checkpoint_{epoch:03d}
89
+ monitor: epoch
90
+ verbose: false
91
+ save_last: true
92
+ save_top_k: 25
93
+ mode: max
94
+ auto_insert_metric_name: true
95
+ save_weights_only: false
96
+ every_n_train_steps: null
97
+ train_time_interval: null
98
+ every_n_epochs: 50
99
+ save_on_train_epoch_end: null
100
+ model_summary:
101
+ _target_: lightning.pytorch.callbacks.RichModelSummary
102
+ max_depth: 3
103
+ rich_progress_bar:
104
+ _target_: lightning.pytorch.callbacks.RichProgressBar
105
+ extras:
106
+ ignore_warnings: false
107
+ enforce_tags: true
108
+ print_config: true
109
+ task_name: train
110
+ tags:
111
+ - ja004
112
+ ckpt_path: datas/ja004/checkpoint_epoch=4999.ckpt
113
+ seed: 1234
runs/2024-09-09_09-40-19/train.log ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-09-09 09:40:19,298][matcha.utils.utils][INFO] - Enforcing tags! <cfg.extras.enforce_tags=True>
2
+ [2024-09-09 09:40:19,303][matcha.utils.utils][INFO] - Printing config tree with Rich! <cfg.extras.print_config=True>
3
+ [2024-09-09 09:40:19,354][__main__][INFO] - Instantiating datamodule <matcha.data.text_mel_datamodule.TextMelDataModule>
4
+ [2024-09-09 09:40:20,137][__main__][INFO] - Instantiating model <matcha.models.matcha_tts.MatchaTTS>
5
+ [2024-09-09 09:40:20,427][__main__][INFO] - Instantiating callbacks...
6
+ [2024-09-09 09:40:20,428][matcha.utils.instantiators][INFO] - Instantiating callback <lightning.pytorch.callbacks.ModelCheckpoint>
7
+ [2024-09-09 09:40:20,431][matcha.utils.instantiators][INFO] - Instantiating callback <lightning.pytorch.callbacks.RichModelSummary>
8
+ [2024-09-09 09:40:20,431][matcha.utils.instantiators][INFO] - Instantiating callback <lightning.pytorch.callbacks.RichProgressBar>
9
+ [2024-09-09 09:40:20,432][__main__][INFO] - Instantiating loggers...
10
+ [2024-09-09 09:40:20,432][matcha.utils.instantiators][INFO] - Instantiating logger <lightning.pytorch.loggers.tensorboard.TensorBoardLogger>
11
+ [2024-09-09 09:40:20,434][__main__][INFO] - Instantiating trainer <lightning.pytorch.trainer.Trainer>
12
+ [2024-09-09 09:40:20,462][__main__][INFO] - Logging hyperparameters!
13
+ [2024-09-09 09:40:20,502][__main__][INFO] - Starting training!
14
+ [2024-09-09 11:26:23,651][matcha.utils.utils][ERROR] -
15
+ Traceback (most recent call last):
16
+ File "/notebooks/Matcha-TTS/matcha/utils/utils.py", line 76, in wrap
17
+ metric_dict, object_dict = task_func(cfg=cfg)
18
+ File "/notebooks/Matcha-TTS/matcha/train.py", line 79, in train
19
+ trainer.fit(model=model, datamodule=datamodule, ckpt_path=cfg.get("ckpt_path"))
20
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 538, in fit
21
+ call._call_and_handle_interrupt(
22
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py", line 47, in _call_and_handle_interrupt
23
+ return trainer_fn(*args, **kwargs)
24
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 574, in _fit_impl
25
+ self._run(model, ckpt_path=ckpt_path)
26
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 981, in _run
27
+ results = self._run_stage()
28
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 1025, in _run_stage
29
+ self.fit_loop.run()
30
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py", line 205, in run
31
+ self.advance()
32
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py", line 363, in advance
33
+ self.epoch_loop.run(self._data_fetcher)
34
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/loops/training_epoch_loop.py", line 140, in run
35
+ self.advance(data_fetcher)
36
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/loops/training_epoch_loop.py", line 250, in advance
37
+ batch_output = self.automatic_optimization.run(trainer.optimizers[0], batch_idx, kwargs)
38
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 190, in run
39
+ self._optimizer_step(batch_idx, closure)
40
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 268, in _optimizer_step
41
+ call._call_lightning_module_hook(
42
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py", line 167, in _call_lightning_module_hook
43
+ output = fn(*args, **kwargs)
44
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/core/module.py", line 1306, in optimizer_step
45
+ optimizer.step(closure=optimizer_closure)
46
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/core/optimizer.py", line 153, in step
47
+ step_output = self._strategy.optimizer_step(self._optimizer, closure, **kwargs)
48
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py", line 238, in optimizer_step
49
+ return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, **kwargs)
50
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/plugins/precision/amp.py", line 78, in optimizer_step
51
+ closure_result = closure()
52
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 144, in __call__
53
+ self._result = self.closure(*args, **kwargs)
54
+ File "/usr/local/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
55
+ return func(*args, **kwargs)
56
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 138, in closure
57
+ self._backward_fn(step_output.closure_loss)
58
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py", line 239, in backward_fn
59
+ call._call_strategy_hook(self.trainer, "backward", loss, optimizer)
60
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py", line 319, in _call_strategy_hook
61
+ output = fn(*args, **kwargs)
62
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py", line 212, in backward
63
+ self.precision_plugin.backward(closure_loss, self.lightning_module, optimizer, *args, **kwargs)
64
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/plugins/precision/precision.py", line 72, in backward
65
+ model.backward(tensor, *args, **kwargs)
66
+ File "/usr/local/lib/python3.10/site-packages/lightning/pytorch/core/module.py", line 1101, in backward
67
+ loss.backward(*args, **kwargs)
68
+ File "/usr/local/lib/python3.10/site-packages/torch/_tensor.py", line 521, in backward
69
+ torch.autograd.backward(
70
+ File "/usr/local/lib/python3.10/site-packages/torch/autograd/__init__.py", line 289, in backward
71
+ _engine_run_backward(
72
+ File "/usr/local/lib/python3.10/site-packages/torch/autograd/graph.py", line 769, in _engine_run_backward
73
+ return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
74
+ torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 294.00 MiB. GPU 0 has a total capacity of 15.73 GiB of which 43.12 MiB is free. Process 3361396 has 15.69 GiB memory in use. Of the allocated memory 13.75 GiB is allocated by PyTorch, and 1.72 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
75
+ [2024-09-09 11:26:23,658][matcha.utils.utils][INFO] - Output dir: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_09-40-19
runs/2024-09-09_11-41-15/.hydra/config.yaml ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_name: train
2
+ run_name: ja004
3
+ tags:
4
+ - ja004
5
+ train: true
6
+ test: true
7
+ ckpt_path: logs/train/ja004/runs/2024-09-09_09-40-19/checkpoints/last.ckpt
8
+ seed: 1234
9
+ data:
10
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
11
+ name: cvm001
12
+ train_filelist_path: datas/ja004/stage5_train.txt
13
+ valid_filelist_path: datas/ja004/stage5_valid.txt
14
+ batch_size: 80
15
+ num_workers: 2
16
+ pin_memory: true
17
+ cleaners:
18
+ - basic_cleaners2
19
+ add_blank: true
20
+ n_spks: 100
21
+ n_fft: 1024
22
+ n_feats: 80
23
+ sample_rate: 22050
24
+ hop_length: 256
25
+ win_length: 1024
26
+ f_min: 0
27
+ f_max: 8000
28
+ data_statistics:
29
+ mel_mean: -5.714534282684326
30
+ mel_std: 2.280709743499756
31
+ seed: ${seed}
32
+ model:
33
+ _target_: matcha.models.matcha_tts.MatchaTTS
34
+ n_vocab: 178
35
+ n_spks: ${data.n_spks}
36
+ spk_emb_dim: 64
37
+ n_feats: 80
38
+ data_statistics: ${data.data_statistics}
39
+ out_size: null
40
+ prior_loss: true
41
+ encoder:
42
+ encoder_type: RoPE Encoder
43
+ encoder_params:
44
+ n_feats: ${model.n_feats}
45
+ n_channels: 192
46
+ filter_channels: 768
47
+ filter_channels_dp: 256
48
+ n_heads: 2
49
+ n_layers: 6
50
+ kernel_size: 3
51
+ p_dropout: 0.1
52
+ spk_emb_dim: 64
53
+ n_spks: 1
54
+ prenet: true
55
+ duration_predictor_params:
56
+ filter_channels_dp: ${model.encoder.encoder_params.filter_channels_dp}
57
+ kernel_size: 3
58
+ p_dropout: ${model.encoder.encoder_params.p_dropout}
59
+ decoder:
60
+ channels:
61
+ - 256
62
+ - 256
63
+ dropout: 0.05
64
+ attention_head_dim: 64
65
+ n_blocks: 1
66
+ num_mid_blocks: 2
67
+ num_heads: 2
68
+ act_fn: snakebeta
69
+ cfm:
70
+ name: CFM
71
+ solver: euler
72
+ sigma_min: 0.0001
73
+ optimizer:
74
+ _target_: torch.optim.Adam
75
+ _partial_: true
76
+ lr: 0.0001
77
+ weight_decay: 0.0
78
+ callbacks:
79
+ model_checkpoint:
80
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
81
+ dirpath: ${paths.output_dir}/checkpoints
82
+ filename: checkpoint_{epoch:03d}
83
+ monitor: epoch
84
+ verbose: false
85
+ save_last: true
86
+ save_top_k: 25
87
+ mode: max
88
+ auto_insert_metric_name: true
89
+ save_weights_only: false
90
+ every_n_train_steps: null
91
+ train_time_interval: null
92
+ every_n_epochs: 50
93
+ save_on_train_epoch_end: null
94
+ model_summary:
95
+ _target_: lightning.pytorch.callbacks.RichModelSummary
96
+ max_depth: 3
97
+ rich_progress_bar:
98
+ _target_: lightning.pytorch.callbacks.RichProgressBar
99
+ logger:
100
+ tensorboard:
101
+ _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
102
+ save_dir: ${paths.output_dir}/tensorboard/
103
+ name: null
104
+ log_graph: false
105
+ default_hp_metric: true
106
+ prefix: ''
107
+ trainer:
108
+ _target_: lightning.pytorch.trainer.Trainer
109
+ default_root_dir: ${paths.output_dir}
110
+ max_epochs: -1
111
+ accelerator: gpu
112
+ devices:
113
+ - 0
114
+ precision: 16-mixed
115
+ check_val_every_n_epoch: 1
116
+ deterministic: false
117
+ gradient_clip_val: 5.0
118
+ paths:
119
+ root_dir: ${oc.env:PROJECT_ROOT}
120
+ data_dir: ${paths.root_dir}/data/
121
+ log_dir: ${paths.root_dir}/logs/
122
+ output_dir: ${hydra:runtime.output_dir}
123
+ work_dir: ${hydra:runtime.cwd}
124
+ extras:
125
+ ignore_warnings: false
126
+ enforce_tags: true
127
+ print_config: true
runs/2024-09-09_11-41-15/.hydra/hydra.yaml ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${paths.log_dir}/${task_name}/${run_name}/runs/${now:%Y-%m-%d}_${now:%H-%M-%S}
4
+ sweep:
5
+ dir: ${paths.log_dir}/${task_name}/${run_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ colorlog:
72
+ (): colorlog.ColoredFormatter
73
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
74
+ handlers:
75
+ console:
76
+ class: logging.StreamHandler
77
+ formatter: colorlog
78
+ stream: ext://sys.stdout
79
+ root:
80
+ level: INFO
81
+ handlers:
82
+ - console
83
+ disable_existing_loggers: false
84
+ job_logging:
85
+ version: 1
86
+ formatters:
87
+ simple:
88
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
89
+ colorlog:
90
+ (): colorlog.ColoredFormatter
91
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
92
+ - %(message)s'
93
+ log_colors:
94
+ DEBUG: purple
95
+ INFO: green
96
+ WARNING: yellow
97
+ ERROR: red
98
+ CRITICAL: red
99
+ handlers:
100
+ console:
101
+ class: logging.StreamHandler
102
+ formatter: colorlog
103
+ stream: ext://sys.stdout
104
+ file:
105
+ class: logging.FileHandler
106
+ formatter: simple
107
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
108
+ root:
109
+ level: INFO
110
+ handlers:
111
+ - console
112
+ - file
113
+ disable_existing_loggers: false
114
+ env: {}
115
+ mode: RUN
116
+ searchpath: []
117
+ callbacks: {}
118
+ output_subdir: .hydra
119
+ overrides:
120
+ hydra:
121
+ - hydra.mode=RUN
122
+ task:
123
+ - experiment=ja004
124
+ job:
125
+ name: train
126
+ chdir: null
127
+ override_dirname: experiment=ja004
128
+ id: ???
129
+ num: ???
130
+ config_name: train.yaml
131
+ env_set: {}
132
+ env_copy: []
133
+ config:
134
+ override_dirname:
135
+ kv_sep: '='
136
+ item_sep: ','
137
+ exclude_keys: []
138
+ runtime:
139
+ version: 1.3.2
140
+ version_base: '1.3'
141
+ cwd: /notebooks/Matcha-TTS
142
+ config_sources:
143
+ - path: hydra.conf
144
+ schema: pkg
145
+ provider: hydra
146
+ - path: /notebooks/Matcha-TTS/configs
147
+ schema: file
148
+ provider: main
149
+ - path: hydra_plugins.hydra_colorlog.conf
150
+ schema: pkg
151
+ provider: hydra-colorlog
152
+ - path: ''
153
+ schema: structured
154
+ provider: schema
155
+ output_dir: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_11-41-15
156
+ choices:
157
+ debug: null
158
+ local: default
159
+ hparams_search: null
160
+ experiment: ja004
161
+ hydra: default
162
+ extras: default
163
+ paths: default
164
+ trainer: default
165
+ logger: tensorboard
166
+ callbacks: default
167
+ model: matcha
168
+ model/optimizer: adam.yaml
169
+ model/cfm: default.yaml
170
+ model/decoder: default.yaml
171
+ model/encoder: default.yaml
172
+ data: ja004.yaml
173
+ hydra/env: default
174
+ hydra/callbacks: null
175
+ hydra/job_logging: colorlog
176
+ hydra/hydra_logging: colorlog
177
+ hydra/hydra_help: default
178
+ hydra/help: default
179
+ hydra/sweeper: basic
180
+ hydra/launcher: basic
181
+ hydra/output: default
182
+ verbose: false
runs/2024-09-09_11-41-15/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ - experiment=ja004
runs/2024-09-09_11-41-15/checkpoints/checkpoint_epoch=5099.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55d2c595fdea6505359713667d3390e93440e78974cd492c323627c4580adb89
3
+ size 250674630
runs/2024-09-09_11-41-15/checkpoints/checkpoint_epoch=5149.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b62a1c464e17f1b060eb53b82afd423254ce158ffcf0d22093124a69c9fd093e
3
+ size 250675013
runs/2024-09-09_11-41-15/checkpoints/checkpoint_epoch=5199.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ceb0bf3962ecff77de6496758a69e19e92121205436ae0728ac0c77d31018f7
3
+ size 250675396
runs/2024-09-09_11-41-15/checkpoints/last.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8da4cbd2c372cf236b311bf7f647e29a515fc24514e163b2086f9cfccfdce5cb
3
+ size 250675396
runs/2024-09-09_11-41-15/config_tree.log ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONFIG
2
+ ├── data
3
+ │ └── _target_: matcha.data.text_mel_datamodule.TextMelDataModule
4
+ │ name: cvm001
5
+ │ train_filelist_path: datas/ja004/stage5_train.txt
6
+ │ valid_filelist_path: datas/ja004/stage5_valid.txt
7
+ │ batch_size: 80
8
+ │ num_workers: 2
9
+ │ pin_memory: true
10
+ │ cleaners:
11
+ │ - basic_cleaners2
12
+ │ add_blank: true
13
+ │ n_spks: 100
14
+ │ n_fft: 1024
15
+ │ n_feats: 80
16
+ │ sample_rate: 22050
17
+ │ hop_length: 256
18
+ │ win_length: 1024
19
+ │ f_min: 0
20
+ │ f_max: 8000
21
+ │ data_statistics:
22
+ │ mel_mean: -5.714534282684326
23
+ │ mel_std: 2.280709743499756
24
+ │ seed: 1234
25
+
26
+ ├── model
27
+ │ └── _target_: matcha.models.matcha_tts.MatchaTTS
28
+ │ n_vocab: 178
29
+ │ n_spks: 100
30
+ │ spk_emb_dim: 64
31
+ │ n_feats: 80
32
+ │ data_statistics:
33
+ │ mel_mean: -5.714534282684326
34
+ │ mel_std: 2.280709743499756
35
+ │ out_size: null
36
+ │ prior_loss: true
37
+ │ encoder:
38
+ │ encoder_type: RoPE Encoder
39
+ │ encoder_params:
40
+ │ n_feats: 80
41
+ │ n_channels: 192
42
+ │ filter_channels: 768
43
+ │ filter_channels_dp: 256
44
+ │ n_heads: 2
45
+ │ n_layers: 6
46
+ │ kernel_size: 3
47
+ │ p_dropout: 0.1
48
+ │ spk_emb_dim: 64
49
+ │ n_spks: 1
50
+ │ prenet: true
51
+ │ duration_predictor_params:
52
+ │ filter_channels_dp: 256
53
+ │ kernel_size: 3
54
+ │ p_dropout: 0.1
55
+ │ decoder:
56
+ │ channels:
57
+ │ - 256
58
+ │ - 256
59
+ │ dropout: 0.05
60
+ │ attention_head_dim: 64
61
+ │ n_blocks: 1
62
+ │ num_mid_blocks: 2
63
+ │ num_heads: 2
64
+ │ act_fn: snakebeta
65
+ │ cfm:
66
+ │ name: CFM
67
+ │ solver: euler
68
+ │ sigma_min: 0.0001
69
+ │ optimizer:
70
+ │ _target_: torch.optim.Adam
71
+ │ _partial_: true
72
+ │ lr: 0.0001
73
+ │ weight_decay: 0.0
74
+
75
+ ├── callbacks
76
+ │ └── model_checkpoint:
77
+ │ _target_: lightning.pytorch.callbacks.ModelCheckpoint
78
+ │ dirpath: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_11-41-15/checkpoints
79
+ │ filename: checkpoint_{epoch:03d}
80
+ │ monitor: epoch
81
+ │ verbose: false
82
+ │ save_last: true
83
+ │ save_top_k: 25
84
+ │ mode: max
85
+ │ auto_insert_metric_name: true
86
+ │ save_weights_only: false
87
+ │ every_n_train_steps: null
88
+ │ train_time_interval: null
89
+ │ every_n_epochs: 50
90
+ │ save_on_train_epoch_end: null
91
+ │ model_summary:
92
+ │ _target_: lightning.pytorch.callbacks.RichModelSummary
93
+ │ max_depth: 3
94
+ │ rich_progress_bar:
95
+ │ _target_: lightning.pytorch.callbacks.RichProgressBar
96
+
97
+ ├── logger
98
+ │ └── tensorboard:
99
+ │ _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
100
+ │ save_dir: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_11-41-15/tensorboard/
101
+ │ name: null
102
+ │ log_graph: false
103
+ │ default_hp_metric: true
104
+ │ prefix: ''
105
+
106
+ ├── trainer
107
+ │ └── _target_: lightning.pytorch.trainer.Trainer
108
+ │ default_root_dir: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_11-41-15
109
+ │ max_epochs: -1
110
+ │ accelerator: gpu
111
+ │ devices:
112
+ │ - 0
113
+ │ precision: 16-mixed
114
+ │ check_val_every_n_epoch: 1
115
+ │ deterministic: false
116
+ │ gradient_clip_val: 5.0
117
+
118
+ ├── paths
119
+ │ └── root_dir: /notebooks/Matcha-TTS
120
+ │ data_dir: /notebooks/Matcha-TTS/data/
121
+ │ log_dir: /notebooks/Matcha-TTS/logs/
122
+ │ output_dir: /notebooks/Matcha-TTS/logs/train/ja004/runs/2024-09-09_11-41-15
123
+ │ work_dir: /notebooks/Matcha-TTS
124
+
125
+ ├── extras
126
+ │ └── ignore_warnings: false
127
+ │ enforce_tags: true
128
+ │ print_config: true
129
+
130
+ ├── task_name
131
+ │ └── train
132
+ ├── run_name
133
+ │ └── ja004
134
+ ├── tags
135
+ │ └── ['ja004']
136
+ ├── train
137
+ │ └── True
138
+ ├── test
139
+ │ └── True
140
+ ├── ckpt_path
141
+ │ └── logs/train/ja004/runs/2024-09-09_09-40-19/checkpoints/last.ckpt
142
+ └── seed
143
+ └── 1234
runs/2024-09-09_11-41-15/tags.log ADDED
@@ -0,0 +1 @@
 
 
1
+ ['ja004']
runs/2024-09-09_11-41-15/tensorboard/version_0/events.out.tfevents.1725882077.nap53itixi.6727.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd6ae645c709f786404b65d5481b80f20084ea1fc7be6992b561ea48b9f10d73
3
+ size 38452592
runs/2024-09-09_11-41-15/tensorboard/version_0/hparams.yaml ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ _target_: matcha.models.matcha_tts.MatchaTTS
3
+ n_vocab: 178
4
+ n_spks: ${data.n_spks}
5
+ spk_emb_dim: 64
6
+ n_feats: 80
7
+ data_statistics: ${data.data_statistics}
8
+ out_size: null
9
+ prior_loss: true
10
+ encoder:
11
+ encoder_type: RoPE Encoder
12
+ encoder_params:
13
+ n_feats: ${model.n_feats}
14
+ n_channels: 192
15
+ filter_channels: 768
16
+ filter_channels_dp: 256
17
+ n_heads: 2
18
+ n_layers: 6
19
+ kernel_size: 3
20
+ p_dropout: 0.1
21
+ spk_emb_dim: 64
22
+ n_spks: 1
23
+ prenet: true
24
+ duration_predictor_params:
25
+ filter_channels_dp: ${model.encoder.encoder_params.filter_channels_dp}
26
+ kernel_size: 3
27
+ p_dropout: ${model.encoder.encoder_params.p_dropout}
28
+ decoder:
29
+ channels:
30
+ - 256
31
+ - 256
32
+ dropout: 0.05
33
+ attention_head_dim: 64
34
+ n_blocks: 1
35
+ num_mid_blocks: 2
36
+ num_heads: 2
37
+ act_fn: snakebeta
38
+ cfm:
39
+ name: CFM
40
+ solver: euler
41
+ sigma_min: 0.0001
42
+ optimizer:
43
+ _target_: torch.optim.Adam
44
+ _partial_: true
45
+ lr: 0.0001
46
+ weight_decay: 0.0
47
+ model/params/total: 20856993
48
+ model/params/trainable: 20856993
49
+ model/params/non_trainable: 0
50
+ data:
51
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
52
+ name: cvm001
53
+ train_filelist_path: datas/ja004/stage5_train.txt
54
+ valid_filelist_path: datas/ja004/stage5_valid.txt
55
+ batch_size: 80
56
+ num_workers: 2
57
+ pin_memory: true
58
+ cleaners:
59
+ - basic_cleaners2
60
+ add_blank: true
61
+ n_spks: 100
62
+ n_fft: 1024
63
+ n_feats: 80
64
+ sample_rate: 22050
65
+ hop_length: 256
66
+ win_length: 1024
67
+ f_min: 0
68
+ f_max: 8000
69
+ data_statistics:
70
+ mel_mean: -5.714534282684326
71
+ mel_std: 2.280709743499756
72
+ seed: ${seed}
73
+ trainer:
74
+ _target_: lightning.pytorch.trainer.Trainer
75
+ default_root_dir: ${paths.output_dir}
76
+ max_epochs: -1
77
+ accelerator: gpu
78
+ devices:
79
+ - 0
80
+ precision: 16-mixed
81
+ check_val_every_n_epoch: 1
82
+ deterministic: false
83
+ gradient_clip_val: 5.0
84
+ callbacks:
85
+ model_checkpoint:
86
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
87
+ dirpath: ${paths.output_dir}/checkpoints
88
+ filename: checkpoint_{epoch:03d}
89
+ monitor: epoch
90
+ verbose: false
91
+ save_last: true
92
+ save_top_k: 25
93
+ mode: max
94
+ auto_insert_metric_name: true
95
+ save_weights_only: false
96
+ every_n_train_steps: null
97
+ train_time_interval: null
98
+ every_n_epochs: 50
99
+ save_on_train_epoch_end: null
100
+ model_summary:
101
+ _target_: lightning.pytorch.callbacks.RichModelSummary
102
+ max_depth: 3
103
+ rich_progress_bar:
104
+ _target_: lightning.pytorch.callbacks.RichProgressBar
105
+ extras:
106
+ ignore_warnings: false
107
+ enforce_tags: true
108
+ print_config: true
109
+ task_name: train
110
+ tags:
111
+ - ja004
112
+ ckpt_path: logs/train/ja004/runs/2024-09-09_09-40-19/checkpoints/last.ckpt
113
+ seed: 1234
runs/2024-09-09_11-41-15/train.log ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-09-09 11:41:15,996][matcha.utils.utils][INFO] - Enforcing tags! <cfg.extras.enforce_tags=True>
2
+ [2024-09-09 11:41:16,000][matcha.utils.utils][INFO] - Printing config tree with Rich! <cfg.extras.print_config=True>
3
+ [2024-09-09 11:41:16,054][__main__][INFO] - Instantiating datamodule <matcha.data.text_mel_datamodule.TextMelDataModule>
4
+ [2024-09-09 11:41:16,938][__main__][INFO] - Instantiating model <matcha.models.matcha_tts.MatchaTTS>
5
+ [2024-09-09 11:41:17,281][__main__][INFO] - Instantiating callbacks...
6
+ [2024-09-09 11:41:17,282][matcha.utils.instantiators][INFO] - Instantiating callback <lightning.pytorch.callbacks.ModelCheckpoint>
7
+ [2024-09-09 11:41:17,285][matcha.utils.instantiators][INFO] - Instantiating callback <lightning.pytorch.callbacks.RichModelSummary>
8
+ [2024-09-09 11:41:17,285][matcha.utils.instantiators][INFO] - Instantiating callback <lightning.pytorch.callbacks.RichProgressBar>
9
+ [2024-09-09 11:41:17,286][__main__][INFO] - Instantiating loggers...
10
+ [2024-09-09 11:41:17,286][matcha.utils.instantiators][INFO] - Instantiating logger <lightning.pytorch.loggers.tensorboard.TensorBoardLogger>
11
+ [2024-09-09 11:41:17,289][__main__][INFO] - Instantiating trainer <lightning.pytorch.trainer.Trainer>
12
+ [2024-09-09 11:41:17,318][__main__][INFO] - Logging hyperparameters!
13
+ [2024-09-09 11:41:17,361][__main__][INFO] - Starting training!