File size: 2,131 Bytes
694601c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
exp:
  dir: ./exps
  name: oggrestore
datas:
  _target_: look2hear.datas.DataModule
  dataset_type: 1
  sr: 44100
  segments: 4
  num_steps: 1000
  batch_size: 1
  num_workers: 0
  pin_memory: true
  stems:
    original: original
    codec: codec
  train:
    dir:
    - train
    original_format: wav
    codec_format: ogg
  valid:
    dir:
    - vaild
    original_format: wav
    codec_format: ogg
model:
  _target_: look2hear.models.apollo.Apollo
  sr: 44100
  win: 20
  feature_dim: 64
  layer: 4
discriminator:
  _target_: look2hear.discriminators.frequencydis.MultiFrequencyDiscriminator
  nch: 2
  window:
  - 32
  - 64
  - 128
  - 256
  - 512
  - 1024
  - 2048
optimizer_g:
  _target_: torch.optim.AdamW
  lr: 0.001
  weight_decay: 0.01
optimizer_d:
  _target_: torch.optim.AdamW
  lr: 0.0001
  weight_decay: 0.01
  betas:
  - 0.5
  - 0.99
scheduler_g:
  _target_: torch.optim.lr_scheduler.StepLR
  step_size: 2
  gamma: 0.98
scheduler_d:
  _target_: torch.optim.lr_scheduler.StepLR
  step_size: 2
  gamma: 0.98
loss_g:
  _target_: look2hear.losses.gan_losses.MultiFrequencyGenLoss
  eps: 1.0e-08
loss_d:
  _target_: look2hear.losses.gan_losses.MultiFrequencyDisLoss
  eps: 1.0e-08
metrics:
  _target_: look2hear.losses.MultiSrcNegSDR
  sdr_type: sisdr
system:
  _target_: look2hear.system.audio_litmodule.AudioLightningModule
early_stopping:
  _target_: pytorch_lightning.callbacks.EarlyStopping
  monitor: val_loss
  patience: 50
  mode: min
  verbose: true
checkpoint:
  _target_: pytorch_lightning.callbacks.ModelCheckpoint
  dirpath: ${exp.dir}/${exp.name}/checkpoints
  monitor: val_loss
  mode: min
  verbose: true
  save_top_k: 10
  save_last: true
  filename: '{epoch}-{val_loss:.4f}'
logger:
  _target_: pytorch_lightning.loggers.WandbLogger
  name: ${exp.name}
  save_dir: ${exp.dir}/${exp.name}/logs
  offline: false
  project: Audio-Restoration
trainer:
  _target_: pytorch_lightning.Trainer
  devices:
  - 0
  max_epochs: 50
  sync_batchnorm: true
  default_root_dir: ${exp.dir}/${exp.name}/
  accelerator: cuda
  limit_train_batches: 1.0
  fast_dev_run: false
  precision: bf16
  enable_model_summary: true