Dongchao commited on
Commit
0d492e4
·
verified ·
1 Parent(s): d1b63e9

Upload v0.2

Browse files
infer_config.yaml CHANGED
@@ -32,15 +32,15 @@ prefix_lm: false
32
  num_codebooks: 1
33
  num_channels: 32
34
  unet_model_name: transformer-2d
35
- transformer_diffusion_config: Dongchao/tmp_rc/model_config.json
36
- sq_config: Dongchao/tmp_rc/sqcodec_config.yaml
37
- sq_resume: Dongchao/tmp_rc/sqcodec.pth
38
- whisper_path: Dongchao/tmp_rc/whisper-medium
39
- reason_lm_path: Dongchao/tmp_rc/audiothinker.pth
40
- llm_path: Dongchao/tmp_rc/Llama-3.2-3B
41
- reconstruction_path: Dongchao/tmp_rc/semantic.checkpoint
42
- prompt_path: Dongchao/tmp_rc/prompt.json
43
- best_rq_ckpt: Dongchao/tmp_rc/music_ssl.pt
44
 
45
  print_freq: 100
46
  save_interval: 5000
 
32
  num_codebooks: 1
33
  num_channels: 32
34
  unet_model_name: transformer-2d
35
+ transformer_diffusion_config: /turing_music_fs/music_data/ydc/exp2/reason_ckpt/model_config.json
36
+ sq_config: /turing_music_fs/music_data/ydc/exp2/reason_ckpt/sqcodec_config.yaml
37
+ sq_resume: /turing_music_fs/music_data/ydc/exp2/reason_ckpt/sqcodec.pth
38
+ whisper_path: /turing_music_fs/music_data/ydc/exp2/reason_ckpt/whisper-medium
39
+ reason_lm_path: /turing_music_fs/music_data/ydc/exp2/reason_ckpt/audiothinker.pth
40
+ llm_path: /turing_music_fs/music_data/ydc/exp2/reason_ckpt/Llama-3.2-3B
41
+ reconstruction_path: /turing_music_fs/music_data/ydc/exp2/reason_ckpt/semantic.checkpoint
42
+ prompt_path: /turing_music_fs/music_data/ydc/exp2/reason_ckpt/prompt.json
43
+ best_rq_ckpt: /turing_music_fs/music_data/ydc/exp2/reason_ckpt/music_ssl.pt
44
 
45
  print_freq: 100
46
  save_interval: 5000
wavlm/.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
wavlm/README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ datasets:
5
+ tags:
6
+ - speech
7
+ inference: false
8
+ ---
9
+
10
+ # WavLM-Base-Plus
11
+
12
+ [Microsoft's WavLM](https://github.com/microsoft/unilm/tree/master/wavlm)
13
+
14
+ The base model pretrained on 16kHz sampled speech audio. When using the model, make sure that your speech input is also sampled at 16kHz.
15
+
16
+ **Note**: This model does not have a tokenizer as it was pretrained on audio alone. In order to use this model **speech recognition**, a tokenizer should be created and the model should be fine-tuned on labeled text data. Check out [this blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) for more in-detail explanation of how to fine-tune the model.
17
+
18
+ The model was pre-trained on:
19
+
20
+ - 60,000 hours of [Libri-Light](https://arxiv.org/abs/1912.07875)
21
+ - 10,000 hours of [GigaSpeech](https://arxiv.org/abs/2106.06909)
22
+ - 24,000 hours of [VoxPopuli](https://arxiv.org/abs/2101.00390)
23
+
24
+ [Paper: WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900)
25
+
26
+ Authors: Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei
27
+
28
+ **Abstract**
29
+ *Self-supervised learning (SSL) achieves great success in speech recognition, while limited exploration has been attempted for other speech processing tasks. As speech signal contains multi-faceted information including speaker identity, paralinguistics, spoken content, etc., learning universal representations for all speech tasks is challenging. In this paper, we propose a new pre-trained model, WavLM, to solve full-stack downstream speech tasks. WavLM is built based on the HuBERT framework, with an emphasis on both spoken content modeling and speaker identity preservation. We first equip the Transformer structure with gated relative position bias to improve its capability on recognition tasks. For better speaker discrimination, we propose an utterance mixing training strategy, where additional overlapped utterances are created unsupervisely and incorporated during model training. Lastly, we scale up the training dataset from 60k hours to 94k hours. WavLM Large achieves state-of-the-art performance on the SUPERB benchmark, and brings significant improvements for various speech processing tasks on their representative benchmarks.*
30
+
31
+ The original model can be found under https://github.com/microsoft/unilm/tree/master/wavlm.
32
+
33
+ # Usage
34
+
35
+ This is an English pre-trained speech model that has to be fine-tuned on a downstream task like speech recognition or audio classification before it can be
36
+ used in inference. The model was pre-trained in English and should therefore perform well only in English. The model has been shown to work well on the [SUPERB benchmark](https://superbbenchmark.org/).
37
+
38
+ **Note**: The model was pre-trained on phonemes rather than characters. This means that one should make sure that the input text is converted to a sequence
39
+ of phonemes before fine-tuning.
40
+
41
+ ## Speech Recognition
42
+
43
+ To fine-tune the model for speech recognition, see [the official speech recognition example](https://github.com/huggingface/transformers/tree/master/examples/pytorch/speech-recognition).
44
+
45
+ ## Speech Classification
46
+
47
+ To fine-tune the model for speech classification, see [the official audio classification example](https://github.com/huggingface/transformers/tree/master/examples/pytorch/audio-classification).
48
+
49
+ ## Speaker Verification
50
+
51
+ TODO
52
+
53
+ ## Speaker Diarization
54
+
55
+ TODO
56
+
57
+ # Contribution
58
+
59
+ The model was contributed by [cywang](https://huggingface.co/cywang) and [patrickvonplaten](https://huggingface.co/patrickvonplaten).
60
+
61
+ # License
62
+
63
+ The official license can be found [here](https://github.com/microsoft/UniSpeech/blob/main/LICENSE)
64
+
65
+ ![design](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/wavlm.png)
wavlm/config.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "wavlm-base-plus",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "WavLMModel"
10
+ ],
11
+ "attention_dropout": 0.1,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "sum",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": false,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_norm": "group",
51
+ "feat_proj_dropout": 0.1,
52
+ "feat_quantizer_dropout": 0.0,
53
+ "final_dropout": 0.0,
54
+ "freeze_feat_extract_train": true,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.1,
57
+ "hidden_size": 768,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 3072,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.05,
62
+ "mask_channel_length": 10,
63
+ "mask_channel_min_space": 1,
64
+ "mask_channel_other": 0.0,
65
+ "mask_channel_prob": 0.0,
66
+ "mask_channel_selection": "static",
67
+ "mask_feature_length": 10,
68
+ "mask_feature_min_masks": 0,
69
+ "mask_feature_prob": 0.0,
70
+ "mask_time_length": 10,
71
+ "mask_time_min_masks": 2,
72
+ "mask_time_min_space": 1,
73
+ "mask_time_other": 0.0,
74
+ "mask_time_prob": 0.05,
75
+ "mask_time_selection": "static",
76
+ "model_type": "wavlm",
77
+ "no_mask_channel_overlap": false,
78
+ "no_mask_time_overlap": false,
79
+ "num_adapter_layers": 3,
80
+ "num_attention_heads": 12,
81
+ "num_buckets": 320,
82
+ "num_codevector_groups": 2,
83
+ "num_codevectors_per_group": 320,
84
+ "num_conv_pos_embedding_groups": 16,
85
+ "num_conv_pos_embeddings": 128,
86
+ "num_ctc_classes": 80,
87
+ "num_feat_extract_layers": 7,
88
+ "num_hidden_layers": 12,
89
+ "num_negatives": 100,
90
+ "output_hidden_size": 768,
91
+ "pad_token_id": 0,
92
+ "proj_codevector_dim": 256,
93
+ "replace_prob": 0.5,
94
+ "torch_dtype": "float32",
95
+ "transformers_version": "4.13.0.dev0",
96
+ "use_weighted_layer_sum": false,
97
+ "vocab_size": 32,
98
+ "tokenizer_class": "Wav2Vec2CTCTokenizer"
99
+ }
wavlm/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": false,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
wavlm/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bb273a6ace99408b50cfc81afdbb7ef2de02da2eab0234e18db608ce692fe51
3
+ size 377617425