| |
| prefix: "" |
|
|
| lab_enc_file: /home/m64000/work/IF-MDD/exp_iqra/wavlm_large_None_PhnMonoSSL_ottc_confEnc/save/label_encoder.txt |
| ctc_loss_type: "crottc" |
| encoder_type: "conformer" |
|
|
| wandb_project: "iqra_extra" |
| |
| tags: |
| - PhnMonoSSL |
| - crottc |
| - ConformerEncoder |
| - iqra_extra |
| - TTS_FT |
|
|
| |
| pretrained_models_path: pretrained_models/ |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
|
|
| |
| perceived_ssl_model: "wavlm_large" |
| canonical_ssl_model: Null |
|
|
| |
| ENCODER_DIM: 1024 |
|
|
| |
| feature_fusion: "mono" |
| blend_alpha: 0.5 |
|
|
| |
| |
| |
| data_folder_save: "/home/m64000/work/dataset/data_iqra_extra_is26" |
| train_annotation: !ref <data_folder_save>/iqra_extra_is26_train_aligned.json |
| valid_annotation: !ref <data_folder_save>/iqra_extra_is26_dev_aligned.json |
| test_annotation: !ref <data_folder_save>/iqra_extra_is26_test_aligned.json |
| |
| train_annotation_extra: !ref <data_folder_save>/train-train_with_extra.json |
| use_extra_train_data: False |
|
|
| evaluate_key: "PER" |
| |
| |
| |
| max_save_models: 3 |
| |
| |
|
|
| |
| output_folder: !ref exp_iqra/<perceived_ssl_model>_<canonical_ssl_model>_<feature_fusion>_<prefix> |
| per_file: !ref <output_folder>/per.txt |
| mpd_file: !ref <output_folder>/mpd.txt |
| save_folder: !ref <output_folder>/save |
| train_log: !ref <output_folder>/train_log.txt |
|
|
| on_training_test_wer_folder: !ref <output_folder>/on_training_test_wer |
| on_training_test_mpd_folder: !ref <output_folder>/on_training_test_mpd |
|
|
| |
| training_target: "target" |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| perceived_ssl: !apply:trainer.AutoSSLoader.AutoSSLLoader |
| model_name: !ref <perceived_ssl_model> |
| freeze: !ref <freeze_perceived_ssl> |
| freeze_feature_extractor: !ref <freeze_perceived_feature_extractor> |
| save_path: !ref <pretrained_models_path> |
| output_all_hiddens: False |
| preceived_ssl_emb_layer: -1 |
|
|
| canonical_ssl: !apply:trainer.AutoSSLoader.AutoSSLLoader |
| model_name: !ref <canonical_ssl_model> |
| freeze: !ref <freeze_canonical_ssl> |
| freeze_feature_extractor: !ref <freeze_perceived_feature_extractor> |
| save_path: !ref <pretrained_models_path> |
| output_all_hiddens: False |
|
|
| canonical_ssl_emb_layer: -1 |
|
|
| enc: !new:torch.nn.Sequential |
| - !new:speechbrain.lobes.models.VanillaNN.VanillaNN |
| input_shape: [null, null, !ref <ENCODER_DIM>] |
| activation: !ref <activation> |
| dnn_blocks: !ref <dnn_layers> |
| dnn_neurons: !ref <dnn_neurons> |
| - !new:torch.nn.LayerNorm |
| normalized_shape: !ref <dnn_neurons> |
|
|
|
|
| kernel_size: 7 |
| attention_type: "RoPEMHA" |
| ConformerEncoder: !new:speechbrain.lobes.models.transformer.Conformer.ConformerEncoder |
| num_layers: 2 |
| nhead: 8 |
| d_ffn: !ref <dnn_neurons> |
| d_model: !ref <dnn_neurons> |
| dropout: 0.1 |
| kernel_size: !ref <kernel_size> |
| attention_type: !ref <attention_type> |
|
|
| ctc_lin: !new:speechbrain.nnet.linear.Linear |
| input_size: !ref <dnn_neurons> |
| n_neurons: !ref <output_neurons> |
|
|
| |
| lm_weight: !new:speechbrain.nnet.linear.Linear |
| input_size: !ref <dnn_neurons> |
| n_neurons: 1 |
|
|
| |
| activation: !name:torch.nn.LeakyReLU |
| dnn_layers: 2 |
| dnn_neurons: 384 |
| freeze_perceived_ssl: False |
| freeze_canonical_ssl: False |
| freeze_perceived_feature_extractor: True |
| freeze_canonical_feature_extractor: True |
|
|
| log_softmax: !new:speechbrain.nnet.activations.Softmax |
| apply_log: True |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| ctc_cost: !name:utils.losses.ot_loss.batched_ottc_loss_bucketized |
|
|
|
|
| ctc_cost_mispro: !name:speechbrain.nnet.losses.ctc_loss |
| blank_index: !ref <blank_index> |
|
|
| |
| output_neurons: 71 |
| blank_index: 0 |
|
|
| model: !new:torch.nn.ModuleList |
| - [!ref <enc>, !ref <ctc_lin>, ] |
|
|
| adam_opt_class: !name:torch.optim.Adam |
| lr: !ref <lr> |
|
|
| pretrained_opt_class: !name:torch.optim.Adam |
| lr: !ref <lr_pretrained> |
|
|
| checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer |
| checkpoints_dir: !ref <save_folder> |
| recoverables: |
| model: !ref <model> |
| perceived_ssl: !ref <perceived_ssl> |
| counter: !ref <epoch_counter> |
| allow_partial_load: True |
| |
| |
| |
| |
|
|
| spec_augmentation: !new:speechbrain.augment.freq_domain.SpectrogramDrop |
| drop_length_low: 5 |
| drop_length_high: 27 |
| drop_count_low: 1 |
| drop_count_high: 3 |
| replace: 'zeros' |
|
|
| freq_chunk_augmentation: !new:speechbrain.augment.time_domain.DropFreq |
| drop_freq_low: 1e-14 |
| drop_freq_high: 1 |
| drop_freq_count_low: 1 |
| drop_freq_count_high: 3 |
| drop_freq_width: 0.10 |
| epsilon: 1e-12 |
|
|
| drop_length_high: 3000 |
| time_chunk_augmentation: !new:speechbrain.augment.time_domain.DropChunk |
| drop_length_low: 1000 |
| drop_length_high: !ref <drop_length_high> |
| drop_count_low: 1 |
| drop_count_high: 3 |
|
|
| speed_augmentation: !new:speechbrain.augment.time_domain.SpeedPerturb |
| orig_freq: !ref <sample_rate> |
| speeds: [95, 100, 105] |
|
|
| timewarp_augmentation: !new:speechbrain.augment.freq_domain.Warping |
| warp_window: 5 |
| dim: 1 |
|
|
| augmentation: !new:speechbrain.augment.augmenter.Augmenter |
| augmentations: |
| - !ref <freq_chunk_augmentation> |
| - !ref <time_chunk_augmentation> |
| |
| |
| |
|
|
| epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter |
| limit: !ref <number_of_epochs> |
|
|
| train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger |
| save_file: !ref <train_log> |
|
|
| |
| |
| |
| |
| |
| |
|
|
| ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats |
| metric: !name:speechbrain.nnet.losses.ctc_loss |
| blank_index: !ref <blank_index> |
| reduction: batch |
|
|
| per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats |
|
|
| |
| |
|
|
| seed: 3047 |
| __set_seed: !apply:torch.manual_seed [!ref <seed>] |
|
|
| |
| number_of_epochs: 300 |
| batch_size: 16 |
| lr: 0.0003 |
| sorting: ascending |
| sample_rate: 16000 |
| gradient_accumulation: 2 |
| lr_pretrained: 0.00001 |
|
|
| |
| auto_mix_prec: true |
| |
| precision: fp16 |
| eval_precision: fp32 |
|
|
| |
| train_dataloader_opts: |
| batch_size: !ref <batch_size> |
| |
|
|
| valid_dataloader_opts: |
| batch_size: !ref <batch_size> |
| |
|
|
| test_dataloader_opts: |
| batch_size: !ref <batch_size> |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer |
| collect_in: !ref <save_folder>/ |
| loadables: |
| perceived_ssl: !ref <perceived_ssl> |
| model: !ref <model> |
| tokenizer: !ref <tokenizer> |
|
|
| encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential |
| perceived_ssl: !ref <perceived_ssl> |
| enc: !ref <enc> |
| ctc_lin: !ref <ctc_lin> |
| log_softmax: !ref <log_softmax> |
|
|
| decoding_function: !name:speechbrain.decoders.ctc_greedy_decode |
| blank_id: !ref <blank_index> |
|
|
| tokenizer: !new:speechbrain.dataio.encoder.CTCTextEncoder |
| load_from_file: /home/kevingenghaopeng/MDD/IF-MDD/pretrained_models/iqra_extra_acou_model/ottc_k7_RoPE_TTS_FT/label_encoder.txt |
| |
| modules: |
| encoder: !ref <encoder> |