blackbartblues commited on
Commit
6db05fb
·
verified ·
1 Parent(s): 347fa34

Add hparams.yaml for training reproducibility

Browse files
Files changed (1) hide show
  1. hparams.yaml +122 -0
hparams.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator: gpu
2
+ accumulate_grad_batches: null
3
+ amp_backend: null
4
+ amp_level: null
5
+ auto_lr_find: false
6
+ auto_scale_batch_size: false
7
+ auto_select_gpus: null
8
+ batch_size: 8
9
+ benchmark: null
10
+ betas: !!python/tuple
11
+ - 0.8
12
+ - 0.99
13
+ c_kl: 1.0
14
+ c_mel: 45
15
+ channels: 1
16
+ check_val_every_n_epoch: 1
17
+ checkpoint_epochs: 10
18
+ dataset:
19
+ - !!python/object/apply:pathlib.PosixPath
20
+ - /home/blacku/Projekt Orion/training/
21
+ - dataset.jsonl
22
+ dataset_dir: &id001 !!python/object/apply:pathlib.PosixPath
23
+ - /home/blacku/Projekt Orion/training/
24
+ default_root_dir: *id001
25
+ detect_anomaly: false
26
+ devices: '1'
27
+ enable_checkpointing: true
28
+ enable_model_summary: true
29
+ enable_progress_bar: true
30
+ eps: 1.0e-09
31
+ fast_dev_run: false
32
+ filter_channels: 768
33
+ filter_length: 1024
34
+ gin_channels: 0
35
+ gpus: null
36
+ grad_clip: null
37
+ gradient_clip_algorithm: null
38
+ gradient_clip_val: null
39
+ hidden_channels: 192
40
+ hop_length: 256
41
+ inference_mode: true
42
+ init_lr_ratio: 1.0
43
+ inter_channels: 192
44
+ ipus: null
45
+ kernel_size: 3
46
+ learning_rate: 0.0002
47
+ limit_predict_batches: null
48
+ limit_test_batches: null
49
+ limit_train_batches: 40
50
+ limit_val_batches: null
51
+ log_every_n_steps: 50
52
+ logger: true
53
+ lr_decay: 0.999875
54
+ max_epochs: 6000
55
+ max_phoneme_ids: null
56
+ max_steps: -1
57
+ max_time: null
58
+ mel_channels: 80
59
+ mel_fmax: null
60
+ mel_fmin: 0.0
61
+ min_epochs: null
62
+ min_steps: null
63
+ move_metrics_to_cpu: false
64
+ multiple_trainloader_mode: max_size_cycle
65
+ n_heads: 2
66
+ n_layers: 6
67
+ n_layers_q: 3
68
+ num_nodes: 1
69
+ num_processes: null
70
+ num_sanity_val_steps: 2
71
+ num_speakers: 1
72
+ num_symbols: 256
73
+ num_test_examples: 0
74
+ num_workers: 1
75
+ overfit_batches: 0.0
76
+ p_dropout: 0.1
77
+ plugins: null
78
+ precision: 32
79
+ profiler: null
80
+ quality: medium
81
+ reload_dataloaders_every_n_epochs: 0
82
+ replace_sampler_ddp: true
83
+ resblock: '2'
84
+ resblock_dilation_sizes: !!python/tuple
85
+ - !!python/tuple
86
+ - 1
87
+ - 2
88
+ - !!python/tuple
89
+ - 2
90
+ - 6
91
+ - !!python/tuple
92
+ - 3
93
+ - 12
94
+ resblock_kernel_sizes: !!python/tuple
95
+ - 3
96
+ - 5
97
+ - 7
98
+ resume_from_checkpoint: /home/blacku/Projekt Orion/training/lightning_logs/diuna/checkpoints/epoch=5959-step=1538360.ckpt
99
+ resume_from_single_speaker_checkpoint: null
100
+ sample_bytes: 2
101
+ sample_rate: 22050
102
+ seed: 1234
103
+ segment_size: 8192
104
+ strategy: null
105
+ sync_batchnorm: false
106
+ tpu_cores: null
107
+ track_grad_norm: -1
108
+ upsample_initial_channel: 256
109
+ upsample_kernel_sizes: !!python/tuple
110
+ - 16
111
+ - 16
112
+ - 8
113
+ upsample_rates: !!python/tuple
114
+ - 8
115
+ - 8
116
+ - 4
117
+ use_sdp: true
118
+ use_spectral_norm: false
119
+ val_check_interval: null
120
+ validation_split: 0.0
121
+ warmup_epochs: 0
122
+ win_length: 1024