thailien6 commited on
Commit
3470301
·
verified ·
1 Parent(s): 45ee797

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +42 -0
  2. ACE_checkpoints_3.5B/.gitattributes +36 -0
  3. ACE_checkpoints_3.5B/MERT-v1-330M/.gitattributes +34 -0
  4. ACE_checkpoints_3.5B/MERT-v1-330M/MERT-v1-330M_fairseq.pt +3 -0
  5. ACE_checkpoints_3.5B/MERT-v1-330M/README.md +124 -0
  6. ACE_checkpoints_3.5B/MERT-v1-330M/config.json +92 -0
  7. ACE_checkpoints_3.5B/MERT-v1-330M/configuration_MERT.py +141 -0
  8. ACE_checkpoints_3.5B/MERT-v1-330M/modeling_MERT.py +409 -0
  9. ACE_checkpoints_3.5B/MERT-v1-330M/preprocessor_config.json +9 -0
  10. ACE_checkpoints_3.5B/MERT-v1-330M/pytorch_model.bin +3 -0
  11. ACE_checkpoints_3.5B/README.md +122 -0
  12. ACE_checkpoints_3.5B/config.json +35 -0
  13. ACE_checkpoints_3.5B/mHuBERT-147/.gitattributes +76 -0
  14. ACE_checkpoints_3.5B/mHuBERT-147/README.md +217 -0
  15. ACE_checkpoints_3.5B/mHuBERT-147/checkpoint_best.pt +3 -0
  16. ACE_checkpoints_3.5B/mHuBERT-147/config.json +76 -0
  17. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_1.tsv +3 -0
  18. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_10.tsv +3 -0
  19. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_11.tsv +3 -0
  20. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_2.tsv +3 -0
  21. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_3.tsv +3 -0
  22. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_4.tsv +3 -0
  23. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_5.tsv +3 -0
  24. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_6.tsv +3 -0
  25. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_7.tsv +3 -0
  26. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_8.tsv +3 -0
  27. ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_9.tsv +3 -0
  28. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS.tsv +3 -0
  29. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_1.tsv +3 -0
  30. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_10.tsv +3 -0
  31. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_11.tsv +3 -0
  32. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_12.tsv +3 -0
  33. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_13.tsv +3 -0
  34. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_14.tsv +3 -0
  35. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_15.tsv +3 -0
  36. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_16.tsv +3 -0
  37. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_17.tsv +3 -0
  38. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_18.tsv +3 -0
  39. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_19.tsv +3 -0
  40. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_2.tsv +3 -0
  41. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_20.tsv +3 -0
  42. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_3.tsv +3 -0
  43. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_4.tsv +3 -0
  44. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_5.tsv +3 -0
  45. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_6.tsv +3 -0
  46. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_7.tsv +3 -0
  47. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_8.tsv +3 -0
  48. ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_9.tsv +3 -0
  49. ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_1.tsv +0 -0
  50. ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_10.tsv +0 -0
.gitattributes CHANGED
@@ -57,3 +57,45 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_1.tsv filter=lfs diff=lfs merge=lfs -text
61
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_10.tsv filter=lfs diff=lfs merge=lfs -text
62
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_11.tsv filter=lfs diff=lfs merge=lfs -text
63
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_2.tsv filter=lfs diff=lfs merge=lfs -text
64
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_3.tsv filter=lfs diff=lfs merge=lfs -text
65
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_4.tsv filter=lfs diff=lfs merge=lfs -text
66
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_5.tsv filter=lfs diff=lfs merge=lfs -text
67
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_6.tsv filter=lfs diff=lfs merge=lfs -text
68
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_7.tsv filter=lfs diff=lfs merge=lfs -text
69
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_8.tsv filter=lfs diff=lfs merge=lfs -text
70
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_9.tsv filter=lfs diff=lfs merge=lfs -text
71
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS.tsv filter=lfs diff=lfs merge=lfs -text
72
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_1.tsv filter=lfs diff=lfs merge=lfs -text
73
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_10.tsv filter=lfs diff=lfs merge=lfs -text
74
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_11.tsv filter=lfs diff=lfs merge=lfs -text
75
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_12.tsv filter=lfs diff=lfs merge=lfs -text
76
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_13.tsv filter=lfs diff=lfs merge=lfs -text
77
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_14.tsv filter=lfs diff=lfs merge=lfs -text
78
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_15.tsv filter=lfs diff=lfs merge=lfs -text
79
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_16.tsv filter=lfs diff=lfs merge=lfs -text
80
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_17.tsv filter=lfs diff=lfs merge=lfs -text
81
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_18.tsv filter=lfs diff=lfs merge=lfs -text
82
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_19.tsv filter=lfs diff=lfs merge=lfs -text
83
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_2.tsv filter=lfs diff=lfs merge=lfs -text
84
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_20.tsv filter=lfs diff=lfs merge=lfs -text
85
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_3.tsv filter=lfs diff=lfs merge=lfs -text
86
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_4.tsv filter=lfs diff=lfs merge=lfs -text
87
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_5.tsv filter=lfs diff=lfs merge=lfs -text
88
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_6.tsv filter=lfs diff=lfs merge=lfs -text
89
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_7.tsv filter=lfs diff=lfs merge=lfs -text
90
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_8.tsv filter=lfs diff=lfs merge=lfs -text
91
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_9.tsv filter=lfs diff=lfs merge=lfs -text
92
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_12.tsv filter=lfs diff=lfs merge=lfs -text
93
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_13.tsv filter=lfs diff=lfs merge=lfs -text
94
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_15.tsv filter=lfs diff=lfs merge=lfs -text
95
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_16.tsv filter=lfs diff=lfs merge=lfs -text
96
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_2.tsv filter=lfs diff=lfs merge=lfs -text
97
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_8.tsv filter=lfs diff=lfs merge=lfs -text
98
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/VP_2.tsv filter=lfs diff=lfs merge=lfs -text
99
+ ACE_checkpoints_3.5B/mHuBERT-147/manifest/samromur.tsv filter=lfs diff=lfs merge=lfs -text
100
+ ACE_checkpoints_3.5B/mHuBERT-147/mhubert147_faiss.index filter=lfs diff=lfs merge=lfs -text
101
+ ACE_checkpoints_3.5B/umt5-base/tokenizer.json filter=lfs diff=lfs merge=lfs -text
ACE_checkpoints_3.5B/.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ umt5-base/tokenizer.json filter=lfs diff=lfs merge=lfs -text
ACE_checkpoints_3.5B/MERT-v1-330M/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
ACE_checkpoints_3.5B/MERT-v1-330M/MERT-v1-330M_fairseq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13d9b88455f88f8608399aa0e921d23100298d867d04380402be171a01f50a89
3
+ size 3991038973
ACE_checkpoints_3.5B/MERT-v1-330M/README.md ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ inference: false
4
+ tags:
5
+ - music
6
+ pipeline_tag: audio-classification
7
+ ---
8
+
9
+ # Introduction to our series work
10
+
11
+ The development log of our Music Audio Pre-training (m-a-p) model family:
12
+ - 02/06/2023: [arxiv pre-print](https://arxiv.org/abs/2306.00107) and training [codes](https://github.com/yizhilll/MERT) released.
13
+ - 17/03/2023: we release two advanced music understanding models, [MERT-v1-95M](https://huggingface.co/m-a-p/MERT-v1-95M) and [MERT-v1-330M](https://huggingface.co/m-a-p/MERT-v1-330M) , trained with new paradigm and dataset. They outperform the previous models and can better generalize to more tasks.
14
+ - 14/03/2023: we retrained the MERT-v0 model with open-source-only music dataset [MERT-v0-public](https://huggingface.co/m-a-p/MERT-v0-public)
15
+ - 29/12/2022: a music understanding model [MERT-v0](https://huggingface.co/m-a-p/MERT-v0) trained with **MLM** paradigm, which performs better at downstream tasks.
16
+ - 29/10/2022: a pre-trained MIR model [music2vec](https://huggingface.co/m-a-p/music2vec-v1) trained with **BYOL** paradigm.
17
+
18
+
19
+
20
+ Here is a table for quick model pick-up:
21
+
22
+ | Name | Pre-train Paradigm | Training Data (hour) | Pre-train Context (second) | Model Size | Transformer Layer-Dimension | Feature Rate | Sample Rate | Release Date |
23
+ | ------------------------------------------------------------ | ------------------ | -------------------- | ---------------------------- | ---------- | --------------------------- | ------------ | ----------- | ------------ |
24
+ | [MERT-v1-330M](https://huggingface.co/m-a-p/MERT-v1-330M) | MLM | 160K | 5 | 330M | 24-1024 | 75 Hz | 24K Hz | 17/03/2023 |
25
+ | [MERT-v1-95M](https://huggingface.co/m-a-p/MERT-v1-95M) | MLM | 20K | 5 | 95M | 12-768 | 75 Hz | 24K Hz | 17/03/2023 |
26
+ | [MERT-v0-public](https://huggingface.co/m-a-p/MERT-v0-public) | MLM | 900 | 5 | 95M | 12-768 | 50 Hz | 16K Hz | 14/03/2023 |
27
+ | [MERT-v0](https://huggingface.co/m-a-p/MERT-v0) | MLM | 1000 | 5 | 95 M | 12-768 | 50 Hz | 16K Hz | 29/12/2022 |
28
+ | [music2vec-v1](https://huggingface.co/m-a-p/music2vec-v1) | BYOL | 1000 | 30 | 95 M | 12-768 | 50 Hz | 16K Hz | 30/10/2022 |
29
+
30
+ ## Explanation
31
+
32
+ The m-a-p models share the similar model architecture and the most distinguished difference is the paradigm in used pre-training. Other than that, there are several nuance technical configuration needs to know before using:
33
+
34
+ - **Model Size**: the number of parameters that would be loaded to memory. Please select the appropriate size fitting your hardware.
35
+ - **Transformer Layer-Dimension**: The number of transformer layers and the corresponding feature dimensions can be outputted from our model. This is marked out because features extracted by **different layers could have various performance depending on tasks**.
36
+ - **Feature Rate**: Given a 1-second audio input, the number of features output by the model.
37
+ - **Sample Rate**: The frequency of audio that the model is trained with.
38
+
39
+
40
+
41
+ # Introduction to MERT-v1
42
+
43
+ Compared to MERT-v0, we introduce multiple new things in the MERT-v1 pre-training:
44
+
45
+ - Change the pseudo labels to 8 codebooks from [encodec](https://github.com/facebookresearch/encodec), which potentially has higher quality and empower our model to support music generation.
46
+ - MLM prediction with in-batch noise mixture.
47
+ - Train with higher audio frequency (24K Hz).
48
+ - Train with more audio data (up to 160 thousands of hours).
49
+ - More available model sizes 95M and 330M.
50
+
51
+
52
+
53
+ More details will be written in our coming-soon paper.
54
+
55
+
56
+
57
+ # Model Usage
58
+
59
+ ```python
60
+ # from transformers import Wav2Vec2Processor
61
+ from transformers import Wav2Vec2FeatureExtractor
62
+ from transformers import AutoModel
63
+ import torch
64
+ from torch import nn
65
+ import torchaudio.transforms as T
66
+ from datasets import load_dataset
67
+
68
+ # loading our model weights
69
+ model = AutoModel.from_pretrained("m-a-p/MERT-v1-330M", trust_remote_code=True)
70
+ # loading the corresponding preprocessor config
71
+ processor = Wav2Vec2FeatureExtractor.from_pretrained("m-a-p/MERT-v1-330M",trust_remote_code=True)
72
+
73
+ # load demo audio and set processor
74
+ dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
75
+ dataset = dataset.sort("id")
76
+ sampling_rate = dataset.features["audio"].sampling_rate
77
+
78
+ resample_rate = processor.sampling_rate
79
+ # make sure the sample_rate aligned
80
+ if resample_rate != sampling_rate:
81
+ print(f'setting rate from {sampling_rate} to {resample_rate}')
82
+ resampler = T.Resample(sampling_rate, resample_rate)
83
+ else:
84
+ resampler = None
85
+
86
+ # audio file is decoded on the fly
87
+ if resampler is None:
88
+ input_audio = dataset[0]["audio"]["array"]
89
+ else:
90
+ input_audio = resampler(torch.from_numpy(dataset[0]["audio"]["array"]))
91
+
92
+ inputs = processor(input_audio, sampling_rate=resample_rate, return_tensors="pt")
93
+ with torch.no_grad():
94
+ outputs = model(**inputs, output_hidden_states=True)
95
+
96
+ # take a look at the output shape, there are 25 layers of representation
97
+ # each layer performs differently in different downstream tasks, you should choose empirically
98
+ all_layer_hidden_states = torch.stack(outputs.hidden_states).squeeze()
99
+ print(all_layer_hidden_states.shape) # [25 layer, Time steps, 1024 feature_dim]
100
+
101
+ # for utterance level classification tasks, you can simply reduce the representation in time
102
+ time_reduced_hidden_states = all_layer_hidden_states.mean(-2)
103
+ print(time_reduced_hidden_states.shape) # [25, 1024]
104
+
105
+ # you can even use a learnable weighted average representation
106
+ aggregator = nn.Conv1d(in_channels=25, out_channels=1, kernel_size=1)
107
+ weighted_avg_hidden_states = aggregator(time_reduced_hidden_states.unsqueeze(0)).squeeze()
108
+ print(weighted_avg_hidden_states.shape) # [1024]
109
+ ```
110
+
111
+
112
+
113
+ # Citation
114
+
115
+ ```shell
116
+ @misc{li2023mert,
117
+ title={MERT: Acoustic Music Understanding Model with Large-Scale Self-supervised Training},
118
+ author={Yizhi Li and Ruibin Yuan and Ge Zhang and Yinghao Ma and Xingran Chen and Hanzhi Yin and Chenghua Lin and Anton Ragni and Emmanouil Benetos and Norbert Gyenge and Roger Dannenberg and Ruibo Liu and Wenhu Chen and Gus Xia and Yemin Shi and Wenhao Huang and Yike Guo and Jie Fu},
119
+ year={2023},
120
+ eprint={2306.00107},
121
+ archivePrefix={arXiv},
122
+ primaryClass={cs.SD}
123
+ }
124
+ ```
ACE_checkpoints_3.5B/MERT-v1-330M/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "m-a-p/MERT-v1-330M",
3
+ "activation_dropout": 0.0,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "MERTModel"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "attention_relax": 32.0,
10
+ "auto_map": {
11
+ "AutoConfig": "configuration_MERT.MERTConfig",
12
+ "AutoModel": "modeling_MERT.MERTModel"
13
+ },
14
+ "bos_token_id": 1,
15
+ "classifier_proj_size": 256,
16
+ "conv_bias": false,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "sum",
45
+ "ctc_zero_infinity": false,
46
+ "deepnorm": false,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "group",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_proj_layer_norm": true,
54
+ "feature_extractor_cqt": false,
55
+ "feature_extractor_cqt_bins": 336,
56
+ "final_dropout": 0.0,
57
+ "gradient_checkpointing": false,
58
+ "hidden_act": "gelu",
59
+ "hidden_dropout": 0.0,
60
+ "hidden_size": 1024,
61
+ "initializer_range": 0.02,
62
+ "intermediate_size": 4096,
63
+ "layer_norm_eps": 1e-05,
64
+ "layerdrop": 0.0,
65
+ "mask_channel_length": 10,
66
+ "mask_channel_min_space": 1,
67
+ "mask_channel_other": 0.0,
68
+ "mask_channel_prob": 0.0,
69
+ "mask_channel_selection": "static",
70
+ "mask_feature_length": 10,
71
+ "mask_feature_min_masks": 0,
72
+ "mask_feature_prob": 0.0,
73
+ "mask_time_length": 10,
74
+ "mask_time_min_masks": 2,
75
+ "mask_time_min_space": 1,
76
+ "mask_time_other": 0.0,
77
+ "mask_time_prob": 0.075,
78
+ "mask_time_selection": "static",
79
+ "model_type": "mert_model",
80
+ "num_attention_heads": 16,
81
+ "num_conv_pos_embedding_groups": 16,
82
+ "num_conv_pos_embeddings": 128,
83
+ "num_feat_extract_layers": 7,
84
+ "num_hidden_layers": 24,
85
+ "pad_token_id": 0,
86
+ "sample_rate": 24000,
87
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
88
+ "torch_dtype": "float32",
89
+ "transformers_version": "4.27.1",
90
+ "use_weighted_layer_sum": false,
91
+ "vocab_size": 32
92
+ }
ACE_checkpoints_3.5B/MERT-v1-330M/configuration_MERT.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MERT model configuration
3
+ """
4
+
5
+ import functools
6
+ import operator
7
+
8
+ # from ...configuration_utils import PretrainedConfig
9
+ # from ...utils import logging
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+ # TODO: use this MAP while uploading to Huggingface
16
+ # HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
17
+ # "facebook/hubert-base-ls960": "https://huggingface.co/facebook/hubert-base-ls960/resolve/main/config.json",
18
+ # # See all Hubert models at https://huggingface.co/models?filter=hubert
19
+ # }
20
+
21
+
22
+ class MERTConfig(PretrainedConfig):
23
+ r"""
24
+ """
25
+ model_type = "mert_model"
26
+
27
+ def __init__(
28
+ self,
29
+ vocab_size=32,
30
+ hidden_size=768,
31
+ num_hidden_layers=12,
32
+ num_attention_heads=12,
33
+ intermediate_size=3072,
34
+ hidden_act="gelu",
35
+ hidden_dropout=0.1,
36
+ activation_dropout=0.1,
37
+ attention_dropout=0.1,
38
+ feat_proj_layer_norm=True,
39
+ feat_proj_dropout=0.0,
40
+ final_dropout=0.1,
41
+ layerdrop=0.1,
42
+ initializer_range=0.02,
43
+ layer_norm_eps=1e-5,
44
+ feat_extract_norm="group",
45
+ feat_extract_activation="gelu",
46
+ conv_dim=(512, 512, 512, 512, 512, 512, 512),
47
+ conv_stride=(5, 2, 2, 2, 2, 2, 2),
48
+ conv_kernel=(10, 3, 3, 3, 3, 2, 2),
49
+ conv_bias=False,
50
+ num_conv_pos_embeddings=128,
51
+ num_conv_pos_embedding_groups=16,
52
+ do_stable_layer_norm=False,
53
+ apply_spec_augment=True,
54
+ mask_time_prob=0.05,
55
+ mask_time_length=10,
56
+ mask_time_min_masks=2,
57
+ mask_feature_prob=0.0,
58
+ mask_feature_length=10,
59
+ mask_feature_min_masks=0,
60
+ ctc_loss_reduction="sum",
61
+ ctc_zero_infinity=False,
62
+ use_weighted_layer_sum=False,
63
+ classifier_proj_size=256,
64
+ pad_token_id=0,
65
+ bos_token_id=1,
66
+ eos_token_id=2,
67
+ feature_extractor_cqt=False,
68
+ feature_extractor_cqt_bins=336,
69
+ deepnorm=False,
70
+ attention_relax=-1.0,
71
+ **kwargs
72
+ ):
73
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
74
+ self.hidden_size = hidden_size
75
+ self.feat_extract_norm = feat_extract_norm
76
+ self.feat_extract_activation = feat_extract_activation
77
+ self.conv_dim = list(conv_dim)
78
+ self.conv_stride = list(conv_stride)
79
+ self.conv_kernel = list(conv_kernel)
80
+ self.conv_bias = conv_bias
81
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
82
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
83
+ self.num_feat_extract_layers = len(self.conv_dim)
84
+ self.num_hidden_layers = num_hidden_layers
85
+ self.intermediate_size = intermediate_size
86
+ self.hidden_act = hidden_act
87
+ self.num_attention_heads = num_attention_heads
88
+ self.hidden_dropout = hidden_dropout
89
+ self.attention_dropout = attention_dropout
90
+ self.activation_dropout = activation_dropout
91
+ self.feat_proj_layer_norm = feat_proj_layer_norm
92
+ self.feat_proj_dropout = feat_proj_dropout
93
+ self.final_dropout = final_dropout
94
+ self.layerdrop = layerdrop
95
+ self.layer_norm_eps = layer_norm_eps
96
+ self.initializer_range = initializer_range
97
+ self.vocab_size = vocab_size
98
+ self.do_stable_layer_norm = do_stable_layer_norm
99
+ self.use_weighted_layer_sum = use_weighted_layer_sum
100
+ self.classifier_proj_size = classifier_proj_size
101
+
102
+ if (
103
+ (len(self.conv_stride) != self.num_feat_extract_layers)
104
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
105
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
106
+ ):
107
+ raise ValueError(
108
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
109
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
110
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
111
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
112
+ )
113
+
114
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
115
+ self.apply_spec_augment = apply_spec_augment
116
+ self.mask_time_prob = mask_time_prob
117
+ self.mask_time_length = mask_time_length
118
+ self.mask_time_min_masks = mask_time_min_masks
119
+ self.mask_feature_prob = mask_feature_prob
120
+ self.mask_feature_length = mask_feature_length
121
+ self.mask_feature_min_masks = mask_feature_min_masks
122
+
123
+ # ctc loss
124
+ self.ctc_loss_reduction = ctc_loss_reduction
125
+ self.ctc_zero_infinity = ctc_zero_infinity
126
+
127
+ # cqt feature extractor
128
+ self.feature_extractor_cqt = feature_extractor_cqt
129
+ self.feature_extractor_cqt_bins = feature_extractor_cqt_bins
130
+
131
+ # deepnorm: up-scale weighted residual conection + down-scale initial value transformer encoder
132
+ self.deepnorm = deepnorm
133
+
134
+ self.attention_relax = attention_relax
135
+
136
+ # fix bug with hf > 4.42
137
+ self.conv_pos_batch_norm = False
138
+
139
+ @property
140
+ def inputs_to_logits_ratio(self):
141
+ return functools.reduce(operator.mul, self.conv_stride, 1)
ACE_checkpoints_3.5B/MERT-v1-330M/modeling_MERT.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MERT model definition.
3
+ We largely adapt codes from:
4
+ 1. https://github.com/huggingface/transformers/blob/main/src/transformers/models/hubert/modeling_hubert.py
5
+ 2. https://github.com/facebookresearch/fairseq/blob/main/fairseq/models/wav2vec/wav2vec2.py
6
+ """
7
+
8
+ from typing import Optional, Tuple, Union
9
+ from transformers.modeling_outputs import BaseModelOutput
10
+ import torch
11
+ from torch import nn
12
+
13
+ from transformers.models.hubert.modeling_hubert import (
14
+ HubertFeatureEncoder,
15
+ HubertModel,
16
+ HubertEncoderStableLayerNorm,
17
+ HubertEncoder,
18
+ HubertEncoderLayer,
19
+ HubertPositionalConvEmbedding,
20
+ HubertAttention,
21
+ HubertFeedForward,
22
+ )
23
+
24
+ try:
25
+ from nnAudio import features as nnAudioFeatures
26
+ NNAUDIO_INSTALLED=True
27
+ except:
28
+ print("WARNING: feature_extractor_cqt requires the libray 'nnAudio'")
29
+ NNAUDIO_INSTALLED=False
30
+
31
+ from .configuration_MERT import MERTConfig
32
+
33
+ class MERTFeatureProjection(nn.Module):
34
+ def __init__(self, config):
35
+ super().__init__()
36
+ self.feat_proj_layer_norm = config.feat_proj_layer_norm
37
+ self.feature_extractor_cqt = config.feature_extractor_cqt
38
+
39
+ if self.feature_extractor_cqt:
40
+ # v3 concat features
41
+ self.feature_dimension = config.conv_dim[-1] + config.feature_extractor_cqt_bins
42
+ print(f"feature dimention: {self.feature_dimension}")
43
+ else:
44
+ self.feature_dimension = config.conv_dim[-1]
45
+ if self.feat_proj_layer_norm:
46
+ self.layer_norm = nn.LayerNorm(self.feature_dimension, eps=config.layer_norm_eps)
47
+ self.projection = nn.Linear(self.feature_dimension, config.hidden_size)
48
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
49
+
50
+ def forward(self, hidden_states):
51
+ # non-projected hidden states are needed for quantization
52
+ if self.feat_proj_layer_norm:
53
+ hidden_states = self.layer_norm(hidden_states)
54
+ hidden_states = self.projection(hidden_states)
55
+ hidden_states = self.dropout(hidden_states)
56
+ return hidden_states
57
+
58
+ class MERTModel(HubertModel):
59
+ # overwrite config class
60
+ config_class = MERTConfig
61
+ base_model_prefix = "mert_model"
62
+ def __init__(
63
+ self,
64
+ config: MERTConfig,
65
+ ) -> None:
66
+ """
67
+ initialize the with the grandparent method HubertPreTrainedModel.__init__()
68
+ and modify the HuBERTModel.__init__()
69
+ """
70
+ super(HubertModel, self).__init__(config)
71
+
72
+ self.config = config
73
+
74
+ self.feature_extractor = HubertFeatureEncoder(config)
75
+ self.feature_projection = MERTFeatureProjection(config) # replace Feature Projection for introcuing new feature
76
+
77
+ if self.config.feature_extractor_cqt:
78
+ assert NNAUDIO_INSTALLED, "ERROR: feature_extractor_cqt requires the libray 'nnAudio', try after `pip install nnAudio` "
79
+ print('initializing cqt extractor for MERT')
80
+ self.feature_extractor_cqt = nnAudioFeatures.cqt.CQT(sr=self.config.sample_rate, hop_length=self.config.sample_rate//50, fmin=32.7,
81
+ fmax=None, n_bins=self.config.feature_extractor_cqt_bins, bins_per_octave=self.config.feature_extractor_cqt_bins//7,
82
+ filter_scale=1, norm=1, window='hann', center=True,
83
+ pad_mode='constant', trainable=False,
84
+ output_format='Magnitude', verbose=True)
85
+
86
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
87
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
88
+
89
+
90
+ if config.do_stable_layer_norm:
91
+ assert not config.deepnorm, "must use post-layer_norm with deepnorm"
92
+ self.encoder = HubertEncoderStableLayerNorm(config)
93
+ else:
94
+ if config.deepnorm:
95
+ self.encoder = HubertEncoder_extend(config)
96
+ else:
97
+ self.encoder = HubertEncoder(config)
98
+
99
+ # Initialize weights and apply final processing
100
+ self.post_init()
101
+
102
+ def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None) -> Union[Tuple, BaseModelOutput]:
103
+
104
+ # return super().forward(input_values, attention_mask, mask_time_indices, output_attentions, output_hidden_states, return_dict)
105
+
106
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
107
+ output_hidden_states = (
108
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
109
+ )
110
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
111
+
112
+ extract_features = self.feature_extractor(input_values)
113
+ extract_features = extract_features.transpose(1, 2)
114
+
115
+ # add additional cqt features for transformer input
116
+ if self.config.feature_extractor_cqt:
117
+ features_cqt = self.feature_extractor_cqt(input_values).transpose(1, 2)
118
+ features_cqt = features_cqt[:,:extract_features.shape[1],:] # align shape
119
+ # # v2
120
+ # features_cqt = self.post_cqt_feature_proj(features_cqt)
121
+ # extract_features = self.feature_projection.layer_norm(extract_features) + self.feature_projection.layer_norm(features_cqt) #v2
122
+ # v3
123
+ extract_features = torch.cat([extract_features,features_cqt], 2)
124
+
125
+ if attention_mask is not None:
126
+ # compute reduced attention_mask corresponding to feature vectors
127
+ attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
128
+
129
+ hidden_states = self.feature_projection(extract_features)
130
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
131
+
132
+ encoder_outputs = self.encoder(
133
+ hidden_states,
134
+ attention_mask=attention_mask,
135
+ output_attentions=output_attentions,
136
+ output_hidden_states=output_hidden_states,
137
+ return_dict=return_dict,
138
+ )
139
+
140
+ hidden_states = encoder_outputs[0] # take last_hidden from encoder output
141
+
142
+ if not return_dict:
143
+ return (hidden_states,) + encoder_outputs[1:]
144
+
145
+ return BaseModelOutput(
146
+ last_hidden_state=hidden_states,
147
+ hidden_states=encoder_outputs.hidden_states,
148
+ attentions=encoder_outputs.attentions,
149
+ )
150
+
151
+
152
+ class HubertEncoder_extend(HubertEncoder):
153
+ def __init__(self, config):
154
+ # super().__init__()
155
+ # call nn module initialization
156
+ nn.Module.__init__(self)
157
+ # super(HubertEncoder_extend, self).__init__()
158
+
159
+ self.config = config
160
+ self.pos_conv_embed = HubertPositionalConvEmbedding(config)
161
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
162
+ self.dropout = nn.Dropout(config.hidden_dropout)
163
+
164
+
165
+ self.layers = nn.ModuleList([HubertEncoderLayerExtend(config) for _ in range(config.num_hidden_layers)])
166
+
167
+ self.gradient_checkpointing = False
168
+
169
+ if config.deepnorm:
170
+ import math
171
+ init_scale = math.pow(8.0 * config.num_hidden_layers, 0.25)
172
+ for name, p in self.named_parameters():
173
+ if (
174
+ "feed_forward.intermediate_dense" in name
175
+ or "feed_forward.output_dense" in name
176
+ or "out_proj" in name
177
+ or "v_proj" in name
178
+ ):
179
+ p.data.div_(init_scale)
180
+
181
+ class HubertEncoderLayerExtend(HubertEncoderLayer):
182
+ def __init__(self, config):
183
+ nn.Module.__init__(self)
184
+ # super(HubertEncoderLayerExtend, self).__init__()
185
+ if config.attention_relax > 0 :
186
+ self.attention = HubertAttention_extend(
187
+ embed_dim=config.hidden_size,
188
+ num_heads=config.num_attention_heads,
189
+ dropout=config.attention_dropout,
190
+ is_decoder=False,
191
+ attention_relax=config.attention_relax,
192
+ )
193
+ else:
194
+ self.attention = HubertAttention(
195
+ embed_dim=config.hidden_size,
196
+ num_heads=config.num_attention_heads,
197
+ dropout=config.attention_dropout,
198
+ is_decoder=False,
199
+ )
200
+ self.dropout = nn.Dropout(config.hidden_dropout)
201
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
202
+ self.feed_forward = HubertFeedForward(config)
203
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
204
+
205
+ if config.deepnorm:
206
+ import math
207
+ self.residual_alpha = math.pow(2.0 * config.num_hidden_layers, 0.25)
208
+ else:
209
+ self.residual_alpha = 1.0
210
+
211
+ def residual_connection(self, x, residual):
212
+ '''
213
+ residual: input before f()
214
+ x: output of f(residual)
215
+ '''
216
+ return residual * self.residual_alpha + x
217
+
218
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
219
+ attn_residual = hidden_states
220
+ hidden_states, attn_weights, _ = self.attention(
221
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
222
+ )
223
+ hidden_states = self.dropout(hidden_states)
224
+
225
+ # hidden_states = attn_residual + hidden_states
226
+ hidden_states = self.residual_connection(hidden_states, attn_residual)
227
+
228
+ hidden_states = self.layer_norm(hidden_states)
229
+
230
+ # hidden_states = hidden_states + self.feed_forward(hidden_states)
231
+ ffn_residual = hidden_states
232
+ hidden_states = self.feed_forward(hidden_states)
233
+ hidden_states = self.residual_connection(hidden_states, ffn_residual)
234
+
235
+ hidden_states = self.final_layer_norm(hidden_states)
236
+
237
+ outputs = (hidden_states,)
238
+
239
+ if output_attentions:
240
+ outputs += (attn_weights,)
241
+
242
+ return outputs
243
+
244
+
245
+ class HubertAttention_extend(nn.Module):
246
+ def __init__(
247
+ self,
248
+ embed_dim: int,
249
+ num_heads: int,
250
+ dropout: float = 0.0,
251
+ is_decoder: bool = False,
252
+ bias: bool = True,
253
+ attention_relax: float = -1.0,
254
+ ):
255
+ super().__init__()
256
+ # nn.Module.__init__(self)
257
+ self.embed_dim = embed_dim
258
+ self.num_heads = num_heads
259
+ self.dropout = dropout
260
+ self.head_dim = embed_dim // num_heads
261
+
262
+ if (self.head_dim * num_heads) != self.embed_dim:
263
+ raise ValueError(
264
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
265
+ f" and `num_heads`: {num_heads})."
266
+ )
267
+ self.scaling = self.head_dim**-0.5
268
+ self.is_decoder = is_decoder
269
+
270
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
271
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
272
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
273
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
274
+
275
+ if attention_relax > 0:
276
+ self.attention_relax = attention_relax
277
+
278
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
279
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
280
+
281
+ def forward(
282
+ self,
283
+ hidden_states: torch.Tensor,
284
+ key_value_states: Optional[torch.Tensor] = None,
285
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
286
+ attention_mask: Optional[torch.Tensor] = None,
287
+ layer_head_mask: Optional[torch.Tensor] = None,
288
+ output_attentions: bool = False,
289
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
290
+ """Input shape: Batch x Time x Channel"""
291
+
292
+ # if key_value_states are provided this layer is used as a cross-attention layer
293
+ # for the decoder
294
+ is_cross_attention = key_value_states is not None
295
+
296
+ bsz, tgt_len, _ = hidden_states.size()
297
+
298
+ # get query proj
299
+ query_states = self.q_proj(hidden_states) * self.scaling
300
+ # get key, value proj
301
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
302
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
303
+ # the provided `key_value_states` to support prefix tuning
304
+ if (
305
+ is_cross_attention
306
+ and past_key_value is not None
307
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
308
+ ):
309
+ # reuse k,v, cross_attentions
310
+ key_states = past_key_value[0]
311
+ value_states = past_key_value[1]
312
+ elif is_cross_attention:
313
+ # cross_attentions
314
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
315
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
316
+ elif past_key_value is not None:
317
+ # reuse k, v, self_attention
318
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
319
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
320
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
321
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
322
+ else:
323
+ # self_attention
324
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
325
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
326
+
327
+ if self.is_decoder:
328
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
329
+ # Further calls to cross_attention layer can then reuse all cross-attention
330
+ # key/value_states (first "if" case)
331
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
332
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
333
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
334
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
335
+ past_key_value = (key_states, value_states)
336
+
337
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
338
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
339
+ key_states = key_states.view(*proj_shape)
340
+ value_states = value_states.view(*proj_shape)
341
+
342
+ src_len = key_states.size(1)
343
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
344
+
345
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
346
+ raise ValueError(
347
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
348
+ f" {attn_weights.size()}"
349
+ )
350
+
351
+ if attention_mask is not None:
352
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
353
+ raise ValueError(
354
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
355
+ )
356
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
357
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
358
+
359
+ if self.attention_relax > 0:
360
+ # => (bsz, self.num_heads, tgt_len, src_len)
361
+ # attn_weights_relax = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)/self.attention_relax
362
+ # => (bsz*self.num_heads, tgt_len, src_len)
363
+ attn_weights_relax = attn_weights / self.attention_relax
364
+
365
+ # => (bsz* self.num_heads, tgt_len, 1)
366
+ attn_max_relax = torch.max(attn_weights_relax, dim=-1, keepdim=False).unsqueeze(2)
367
+ attn_weights = (attn_weights_relax - attn_max_relax) * self.attention_relax
368
+
369
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
370
+
371
+ if layer_head_mask is not None:
372
+ if layer_head_mask.size() != (self.num_heads,):
373
+ raise ValueError(
374
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
375
+ f" {layer_head_mask.size()}"
376
+ )
377
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
378
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
379
+
380
+ if output_attentions:
381
+ # this operation is a bit awkward, but it's required to
382
+ # make sure that attn_weights keeps its gradient.
383
+ # In order to do so, attn_weights have to be reshaped
384
+ # twice and have to be reused in the following
385
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
386
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
387
+ else:
388
+ attn_weights_reshaped = None
389
+
390
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
391
+
392
+ attn_output = torch.bmm(attn_probs, value_states)
393
+
394
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
395
+ raise ValueError(
396
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
397
+ f" {attn_output.size()}"
398
+ )
399
+
400
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
401
+ attn_output = attn_output.transpose(1, 2)
402
+
403
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
404
+ # partitioned aross GPUs when using tensor-parallelism.
405
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
406
+
407
+ attn_output = self.out_proj(attn_output)
408
+
409
+ return attn_output, attn_weights_reshaped, past_key_value
ACE_checkpoints_3.5B/MERT-v1-330M/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": false,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 24000
9
+ }
ACE_checkpoints_3.5B/MERT-v1-330M/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c03774d821a6ad972cb48220b128a10ebea2c790116bd0fd35e5a013f8017f6
3
+ size 1261846489
ACE_checkpoints_3.5B/README.md ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - music
5
+ - text2music
6
+ - acestep
7
+ pipeline_tag: text-to-audio
8
+ language:
9
+ - en
10
+ - zh
11
+ - de
12
+ - fr
13
+ - es
14
+ - it
15
+ - pt
16
+ - pl
17
+ - tr
18
+ - ru
19
+ - cs
20
+ - nl
21
+ - ar
22
+ - ja
23
+ - hu
24
+ - ko
25
+ - hi
26
+ ---
27
+
28
+ # ACE-Step: A Step Towards Music Generation Foundation Model
29
+
30
+ ![ACE-Step Framework](https://github.com/ACE-Step/ACE-Step/raw/main/assets/ACE-Step_framework.png)
31
+
32
+ ## Model Description
33
+
34
+ ACE-Step is a novel open-source foundation model for music generation that overcomes key limitations of existing approaches through a holistic architectural design. It integrates diffusion-based generation with Sana's Deep Compression AutoEncoder (DCAE) and a lightweight linear transformer, achieving state-of-the-art performance in generation speed, musical coherence, and controllability.
35
+
36
+ **Key Features:**
37
+ - 15× faster than LLM-based baselines (20s for 4-minute music on A100)
38
+ - Superior musical coherence across melody, harmony, and rhythm
39
+ - full-song generation, duration control and accepts natural language descriptions
40
+
41
+ ## Uses
42
+
43
+ ### Direct Use
44
+ ACE-Step can be used for:
45
+ - Generating original music from text descriptions
46
+ - Music remixing and style transfer
47
+ - edit song lyrics
48
+
49
+ ### Downstream Use
50
+ The model serves as a foundation for:
51
+ - Voice cloning applications
52
+ - Specialized music generation (rap, jazz, etc.)
53
+ - Music production tools
54
+ - Creative AI assistants
55
+
56
+ ### Out-of-Scope Use
57
+ The model should not be used for:
58
+ - Generating copyrighted content without permission
59
+ - Creating harmful or offensive content
60
+ - Misrepresenting AI-generated music as human-created
61
+
62
+ ## How to Get Started
63
+
64
+ see: https://github.com/ace-step/ACE-Step
65
+
66
+ ## Hardware Performance
67
+
68
+ | Device | 27 Steps | 60 Steps |
69
+ |---------------|----------|----------|
70
+ | NVIDIA A100 | 27.27x | 12.27x |
71
+ | RTX 4090 | 34.48x | 15.63x |
72
+ | RTX 3090 | 12.76x | 6.48x |
73
+ | M2 Max | 2.27x | 1.03x |
74
+
75
+ *RTF (Real-Time Factor) shown - higher values indicate faster generation*
76
+
77
+
78
+ ## Limitations
79
+
80
+ - Performance varies by language (top 10 languages perform best)
81
+ - Longer generations (>5 minutes) may lose structural coherence
82
+ - Rare instruments may not render perfectly
83
+ - Output Inconsistency: Highly sensitive to random seeds and input duration, leading to varied "gacha-style" results.
84
+ - Style-specific Weaknesses: Underperforms on certain genres (e.g. Chinese rap/zh_rap) Limited style adherence and musicality ceiling
85
+ - Continuity Artifacts: Unnatural transitions in repainting/extend operations
86
+ - Vocal Quality: Coarse vocal synthesis lacking nuance
87
+ - Control Granularity: Needs finer-grained musical parameter control
88
+
89
+ ## Ethical Considerations
90
+
91
+ Users should:
92
+ - Verify originality of generated works
93
+ - Disclose AI involvement
94
+ - Respect cultural elements and copyrights
95
+ - Avoid harmful content generation
96
+
97
+
98
+ ## Model Details
99
+
100
+ **Developed by:** ACE Studio and StepFun
101
+ **Model type:** Diffusion-based music generation with transformer conditioning
102
+ **License:** Apache 2.0
103
+ **Resources:**
104
+ - [Project Page](https://ace-step.github.io/)
105
+ - [Demo Space](https://huggingface.co/spaces/ACE-Step/ACE-Step)
106
+ - [GitHub Repository](https://github.com/ACE-Step/ACE-Step)
107
+
108
+
109
+ ## Citation
110
+
111
+ ```bibtex
112
+ @misc{gong2025acestep,
113
+ title={ACE-Step: A Step Towards Music Generation Foundation Model},
114
+ author={Junmin Gong, Wenxiao Zhao, Sen Wang, Shengyuan Xu, Jing Guo},
115
+ howpublished={\url{https://github.com/ace-step/ACE-Step}},
116
+ year={2025},
117
+ note={GitHub repository}
118
+ }
119
+ ```
120
+
121
+ ## Acknowledgements
122
+ This project is co-led by ACE Studio and StepFun.
ACE_checkpoints_3.5B/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ACEStepTransformer2DModel",
3
+ "_diffusers_version": "0.32.2",
4
+ "attention_head_dim": 128,
5
+ "in_channels": 8,
6
+ "inner_dim": 2560,
7
+ "lyric_encoder_vocab_size": 6693,
8
+ "lyric_hidden_size": 1024,
9
+ "max_height": 16,
10
+ "max_position": 32768,
11
+ "max_width": 32768,
12
+ "mlp_ratio": 2.5,
13
+ "num_attention_heads": 20,
14
+ "num_layers": 24,
15
+ "out_channels": 8,
16
+ "patch_size": [
17
+ 16,
18
+ 1
19
+ ],
20
+ "rope_theta": 1000000.0,
21
+ "speaker_embedding_dim": 512,
22
+ "ssl_encoder_depths": [
23
+ 8,
24
+ 8
25
+ ],
26
+ "ssl_latent_dims": [
27
+ 1024,
28
+ 768
29
+ ],
30
+ "ssl_names": [
31
+ "mert",
32
+ "m-hubert"
33
+ ],
34
+ "text_embedding_dim": 768
35
+ }
ACE_checkpoints_3.5B/mHuBERT-147/.gitattributes ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ mhubert147_faiss.index filter=lfs diff=lfs merge=lfs -text
37
+ manifest/CV_1.tsv filter=lfs diff=lfs merge=lfs -text
38
+ manifest/CV_10.tsv filter=lfs diff=lfs merge=lfs -text
39
+ manifest/CV_2.tsv filter=lfs diff=lfs merge=lfs -text
40
+ manifest/CV_3.tsv filter=lfs diff=lfs merge=lfs -text
41
+ manifest/CV_4.tsv filter=lfs diff=lfs merge=lfs -text
42
+ manifest/CV_5.tsv filter=lfs diff=lfs merge=lfs -text
43
+ manifest/CV_6.tsv filter=lfs diff=lfs merge=lfs -text
44
+ manifest/CV_7.tsv filter=lfs diff=lfs merge=lfs -text
45
+ manifest/CV_8.tsv filter=lfs diff=lfs merge=lfs -text
46
+ manifest/CV_9.tsv filter=lfs diff=lfs merge=lfs -text
47
+ manifest/CV_11.tsv filter=lfs diff=lfs merge=lfs -text
48
+ manifest/MLS_1.tsv filter=lfs diff=lfs merge=lfs -text
49
+ manifest/MLS_2.tsv filter=lfs diff=lfs merge=lfs -text
50
+ manifest/MLS_3.tsv filter=lfs diff=lfs merge=lfs -text
51
+ manifest/MLS_4.tsv filter=lfs diff=lfs merge=lfs -text
52
+ manifest/MLS_5.tsv filter=lfs diff=lfs merge=lfs -text
53
+ manifest/MLS.tsv filter=lfs diff=lfs merge=lfs -text
54
+ manifest/MLS_10.tsv filter=lfs diff=lfs merge=lfs -text
55
+ manifest/MLS_11.tsv filter=lfs diff=lfs merge=lfs -text
56
+ manifest/MLS_12.tsv filter=lfs diff=lfs merge=lfs -text
57
+ manifest/MLS_13.tsv filter=lfs diff=lfs merge=lfs -text
58
+ manifest/MLS_14.tsv filter=lfs diff=lfs merge=lfs -text
59
+ manifest/MLS_15.tsv filter=lfs diff=lfs merge=lfs -text
60
+ manifest/MLS_16.tsv filter=lfs diff=lfs merge=lfs -text
61
+ manifest/MLS_17.tsv filter=lfs diff=lfs merge=lfs -text
62
+ manifest/MLS_18.tsv filter=lfs diff=lfs merge=lfs -text
63
+ manifest/MLS_19.tsv filter=lfs diff=lfs merge=lfs -text
64
+ manifest/MLS_20.tsv filter=lfs diff=lfs merge=lfs -text
65
+ manifest/MLS_6.tsv filter=lfs diff=lfs merge=lfs -text
66
+ manifest/MLS_7.tsv filter=lfs diff=lfs merge=lfs -text
67
+ manifest/MLS_8.tsv filter=lfs diff=lfs merge=lfs -text
68
+ manifest/MLS_9.tsv filter=lfs diff=lfs merge=lfs -text
69
+ manifest/samromur.tsv filter=lfs diff=lfs merge=lfs -text
70
+ manifest/VL_12.tsv filter=lfs diff=lfs merge=lfs -text
71
+ manifest/VL_2.tsv filter=lfs diff=lfs merge=lfs -text
72
+ manifest/VL_8.tsv filter=lfs diff=lfs merge=lfs -text
73
+ manifest/VL_13.tsv filter=lfs diff=lfs merge=lfs -text
74
+ manifest/VL_15.tsv filter=lfs diff=lfs merge=lfs -text
75
+ manifest/VL_16.tsv filter=lfs diff=lfs merge=lfs -text
76
+ manifest/VP_2.tsv filter=lfs diff=lfs merge=lfs -text
ACE_checkpoints_3.5B/mHuBERT-147/README.md ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-sa-4.0
3
+ language:
4
+ - ab
5
+ - af
6
+ - am
7
+ - ar
8
+ - as
9
+ - az
10
+ - ba
11
+ - be
12
+ - bn
13
+ - bo
14
+ - bs
15
+ - br
16
+ - bg
17
+ - ca
18
+ - cs
19
+ - cv
20
+ - cy
21
+ - da
22
+ - de
23
+ - dv
24
+ - el
25
+ - en
26
+ - eo
27
+ - et
28
+ - eu
29
+ - ee
30
+ - fo
31
+ - fa
32
+ - tl
33
+ - fi
34
+ - fr
35
+ - fy
36
+ - ga
37
+ - gl
38
+ - gv
39
+ - gn
40
+ - gu
41
+ - ht
42
+ - ha
43
+ - he
44
+ - hi
45
+ - hr
46
+ - hu
47
+ - hy
48
+ - ig
49
+ - ia
50
+ - id
51
+ - is
52
+ - it
53
+ - jv
54
+ - ja
55
+ - kn
56
+ - ka
57
+ - kk
58
+ - km
59
+ - rw
60
+ - ky
61
+ - ku
62
+ - ko
63
+ - lo
64
+ - la
65
+ - lv
66
+ - ln
67
+ - lt
68
+ - lb
69
+ - lg
70
+ - ml
71
+ - mr
72
+ - mk
73
+ - mg
74
+ - mt
75
+ - mn
76
+ - mi
77
+ - ms
78
+ - my
79
+ - ne
80
+ - nl
81
+ - nn
82
+ - no
83
+ - oc
84
+ - or
85
+ - pa
86
+ - pl
87
+ - pt
88
+ - ps
89
+ - ro
90
+ - ru
91
+ - sa
92
+ - si
93
+ - sl
94
+ - sk
95
+ - sn
96
+ - sd
97
+ - so
98
+ - st
99
+ - es
100
+ - sq
101
+ - sc
102
+ - sr
103
+ - su
104
+ - sw
105
+ - sv
106
+ - ta
107
+ - tt
108
+ - te
109
+ - tg
110
+ - th
111
+ - tn
112
+ - tk
113
+ - tr
114
+ - tw
115
+ - ug
116
+ - uk
117
+ - ur
118
+ - uz
119
+ - vi
120
+ - xh
121
+ - yi
122
+ - yo
123
+ - zh
124
+ ---
125
+ **This repository contains the best mHuBERT-147 pre-trained model.**
126
+
127
+ **MODEL DETAILS:** 3rd iteration, K=1000, HuBERT base architecture (95M parameters), 147 languages.
128
+
129
+ # mHuBERT-147 models
130
+
131
+ mHuBERT-147 are compact and competitive multilingual HuBERT models trained on 90K hours of open-license data in 147 languages.
132
+ Different from *traditional* HuBERTs, mHuBERT-147 models are trained using faiss IVF discrete speech units.
133
+ Training employs a two-level language, data source up-sampling during training. See more information in [our paper](https://arxiv.org/pdf/2406.06371).
134
+
135
+
136
+ # Table of Contents:
137
+
138
+ 1. [Summary](https://huggingface.co/utter-project/mHuBERT-147#mhubert-147-models)
139
+ 2. [Training Data and Code](https://huggingface.co/utter-project/mHuBERT-147#training)
140
+ 3. [ML-SUPERB Scores](https://huggingface.co/utter-project/mHuBERT-147#ml-superb-scores)
141
+ 4. [Languages and Datasets](https://huggingface.co/utter-project/mHuBERT-147#languages-and-datasets)
142
+ 5. [Intermediate Checkpoints](https://huggingface.co/utter-project/mHuBERT-147#intermediate-checkpoints)
143
+ 6. [Citing and Funding Information](https://huggingface.co/utter-project/mHuBERT-147#citing-and-funding-information)
144
+
145
+
146
+ **This repository contains:**
147
+ * Fairseq checkpoint (original);
148
+ * HuggingFace checkpoint (conversion using transformers library);
149
+ * Faiss index for continuous pre-training (OPQ16_64,IVF1000_HNSW32,PQ16x4fsr).
150
+
151
+ **Related Models:**
152
+ * [2nd Iteration mHuBERT-147](https://huggingface.co/utter-project/mHuBERT-147-base-2nd-iter)
153
+ * [1st Iteration mHuBERT-147](https://huggingface.co/utter-project/mHuBERT-147-base-1st-iter)
154
+ * [CommonVoice Prototype (12 languages)](https://huggingface.co/utter-project/hutter-12-3rd-base)
155
+
156
+ # Training
157
+
158
+ * **[Manifest list available here.](https://huggingface.co/utter-project/mHuBERT-147-base-3rd-iter/tree/main/manifest)** Please note that since training, there were CommonVoice removal requests. This means that some of the listed files are no longer available.
159
+
160
+ * **[Fairseq fork](https://github.com/utter-project/fairseq)** contains the scripts for training with multilingual batching with two-level up-sampling.
161
+
162
+ * **[Scripts for pre-processing/faiss clustering available here.](https://github.com/utter-project/mHuBERT-147-scripts)**
163
+
164
+ # ML-SUPERB Scores
165
+
166
+ mHubert-147 reaches second and first position in the 10min and 1h leaderboards respectively. We achieve new SOTA scores for three LID tasks.
167
+ See more information in [our paper](https://arxiv.org/pdf/2406.06371).
168
+
169
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62262e19d36494a6f743a28d/chXjExnWc3rhhtdsyiU-W.png)
170
+
171
+ # Languages and Datasets
172
+
173
+ **Datasets:** For ASR/ST/TTS datasets, only train set is used.
174
+ * [Aishell](https://www.openslr.org/33/) and [AISHELL-3](https://www.openslr.org/93/)
175
+ * [BibleTTS](https://www.openslr.org/129/)
176
+ * [ClovaCall](https://github.com/clovaai/ClovaCall)
177
+ * [CommonVoice v11](https://commonvoice.mozilla.org/en/datasets)
178
+ * Google TTS data: [Javanese](https://www.openslr.org/41/), [Khmer](https://www.openslr.org/42/), [Nepali](https://www.openslr.org/43/), [Sundanese](https://www.openslr.org/44/), [South African Languages](https://www.openslr.org/32/), [Bengali Languages](https://www.openslr.org/37/)
179
+ * IISc-MILE: [Tamil](https://www.openslr.org/127/), [Kannada](https://www.openslr.org/126/)
180
+ * [Japanese Versatile Speech](https://sites.google.com/site/shinnosuketakamichi/research-topics/jvs_corpus)
181
+ * [Kokoro](https://github.com/kaiidams/Kokoro-Speech-Dataset)
182
+ * [Kosp2e](https://github.com/warnikchow/kosp2e)
183
+ * Media Speech: [Turkish Only](https://www.openslr.org/108/)
184
+ * [Multilingual LibriSpeech](https://www.openslr.org/94/)
185
+ * [Samrómur](https://www.openslr.org/128/)
186
+ * [THCHS-30](https://www.openslr.org/18/) and [THUYG-20](https://www.openslr.org/22/)
187
+ * [VoxLingua107](https://bark.phon.ioc.ee/voxlingua107/)
188
+ * [VoxPopuli](https://github.com/facebookresearch/voxpopuli/)
189
+
190
+ **Languages present not indexed by Huggingface:** Asturian (ast), Basaa (bas), Cebuano (ceb), Central Kurdish/Sorani (ckb), Hakha Chin (cnh), Hawaiian (haw), Upper Sorbian (hsb) Kabyle (kab), Moksha (mdf), Meadow Mari (mhr), Hill Mari (mrj), Erzya (myv), Taiwanese Hokkien (nan-tw), Sursilvan (rm-sursilv), Vallader (rm-vallader), Sakha (sah), Santali (sat), Scots (sco), Saraiki (skr), Tigre (tig), Tok Pisin (tpi), Akwapen Twi (tw-akuapem), Asante Twi (tw-asante), Votic (vot), Waray (war), Cantonese (yue).
191
+
192
+ # Intermediate Checkpoints
193
+
194
+ For allowing research in training dynamics, the intermediate checkpoints for the three iterations are made available under the **CC-BY-NC-SA-4.0** license via a protected link.
195
+
196
+ * **Downloading page:** https://download.europe.naverlabs.com/mhubert147/
197
+ * **User:** user
198
+ * **Password:** license mentioned above in bold
199
+
200
+
201
+ # Citing and Funding Information
202
+
203
+ ```
204
+ @inproceedings{boito2024mhubert,
205
+ author={Boito, Marcely Zanon and Iyer, Vivek and Lagos, Nikolaos and Besacier, Laurent and Calapodescu, Ioan},
206
+ title={{mHuBERT-147: A Compact Multilingual HuBERT Model}},
207
+ year=2024,
208
+ booktitle={Interspeech 2024},
209
+
210
+ ```
211
+
212
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/62262e19d36494a6f743a28d/HbzC1C-uHe25ewTy2wyoK.png" width=7% height=7%>
213
+ This is an output of the European Project UTTER (Unified Transcription and Translation for Extended Reality) funded by European Union’s Horizon Europe Research and Innovation programme under grant agreement number 101070631.
214
+
215
+ For more information please visit https://he-utter.eu/
216
+
217
+ NAVER LABS Europe: https://europe.naverlabs.com/
ACE_checkpoints_3.5B/mHuBERT-147/checkpoint_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d88f79300fbd3dec7b0cc8ded2e3535cf09479d198a230babac835f9c274ef8
3
+ size 1138157677
ACE_checkpoints_3.5B/mHuBERT-147/config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mmHuBERT-3rd-faiss-0.7d-0.9l-1000k-850gb-2M-best",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "HubertModel"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_dropout": 0.0,
45
+ "feat_extract_norm": "group",
46
+ "feat_proj_dropout": 0.1,
47
+ "feat_proj_layer_norm": true,
48
+ "final_dropout": 0.1,
49
+ "gradient_checkpointing": false,
50
+ "hidden_act": "gelu",
51
+ "hidden_dropout": 0.1,
52
+ "hidden_dropout_prob": 0.1,
53
+ "hidden_size": 768,
54
+ "initializer_range": 0.02,
55
+ "intermediate_size": 3072,
56
+ "layer_norm_eps": 1e-05,
57
+ "layerdrop": 0.1,
58
+ "mask_feature_length": 10,
59
+ "mask_feature_min_masks": 0,
60
+ "mask_feature_prob": 0.0,
61
+ "mask_time_length": 10,
62
+ "mask_time_min_masks": 2,
63
+ "mask_time_prob": 0.05,
64
+ "model_type": "hubert",
65
+ "num_attention_heads": 12,
66
+ "num_conv_pos_embedding_groups": 16,
67
+ "num_conv_pos_embeddings": 128,
68
+ "num_feat_extract_layers": 7,
69
+ "num_hidden_layers": 12,
70
+ "pad_token_id": 0,
71
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
72
+ "torch_dtype": "float32",
73
+ "transformers_version": "4.30.1",
74
+ "use_weighted_layer_sum": false,
75
+ "vocab_size": 32
76
+ }
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_1.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d448029528a38cc34b76351a24dba88f457982077cdc552eddac5a799449bbb
3
+ size 23524510
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_10.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46f14f5e1aebc1f53945b6a0965bbd410131a659b6c4d4a963438abd1d0251a8
3
+ size 45518046
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_11.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:925978f6d03e72252547d6555e755b864f2fbf0d325bb9c300c838193270c25e
3
+ size 70441456
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_2.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46bc21105546d8712e569b18d2cdc0297678f9ac7e46f6cfcd208891f6de0060
3
+ size 27692393
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_3.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5610b315716f064a528e21a1f94154a3aee81a85c54713caa144f8714841986a
3
+ size 24904693
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_4.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:253c1125b1c3772dd85561d6401f2b1805fb2f55e36cfd7d0247dd7e0ebdf5c6
3
+ size 37878320
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_5.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fec48b8afe104a0e48101a828ee32a1183b2181e23784e3b551743876f0ad45
3
+ size 13098753
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_6.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b971f9a1154f38e9c80185250546a8977dd4628b3ddc0e978938ddb96cf24e01
3
+ size 56463129
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_7.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4df7710f3d7bf1cb5e37e617694e7d4ea52cd1bcbd0290851132713d092c1f9
3
+ size 107675610
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_8.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79ef8010bd5f2d887e709f0178f46b2bdfb3a35d31608b7bb16edc8b1915bbe9
3
+ size 94747946
ACE_checkpoints_3.5B/mHuBERT-147/manifest/CV_9.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:705b6b829ea759b9e4da9bd31cc8b4516cd383d5f8cb8d63e4024f201bf9df4f
3
+ size 44734175
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2253dd1a6ed0a50c2812e7d8a22d38fea1f30f9798441013feef14a404cfd42f
3
+ size 87469881
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_1.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d75502d848b4579cbd368f3097f19897738a43ffedbdb352c3d88d0fa761cac
3
+ size 26389281
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_10.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3084061b1bdc29ed0d7dfc4787404c6fd9114712fa577d97f35a41afab1ad2e
3
+ size 45084545
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_11.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:172230fc7f0c1e1242041ca5f5500cca4439730190ad6c3b629241808a96d781
3
+ size 25485271
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_12.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e9b467037a1620e80e37145fb3387407ba55a206cec2f9399fb784db6638106
3
+ size 25645643
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_13.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf41eb4642a142f7eaed417225ac409f7b361347503dedede48893ba43d9fe40
3
+ size 35549313
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_14.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a66d37c6f4b64b30fde25594a73e549dcfaac43341fe85d3c0947f7549135198
3
+ size 27803101
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_15.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21a05e582fda9a7a1bc07edb4ce4efc6169b8bf0c9c52220cc92a1623e655db7
3
+ size 31004025
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_16.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33fc13c9600c8ff097b05d3a4a401db1e4caa04af2f3545cf6bc316fe4731a5c
3
+ size 30926137
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_17.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d90000f27cc79304314f4004aa0479212c0c08ccb603c334191e4126f91aecb2
3
+ size 32022161
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_18.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57a398e8b3e99076923b79d0496bfe771049308b5ea76d3aece680d3fe9324f4
3
+ size 44275579
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_19.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:799152054e2d4ef46cdd7d350fc4f8412271508dd28b63d7d42688e1047a419a
3
+ size 21145621
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_2.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaa2c5406ec4b454d60bec30e91496eb895efc16e69e14920eb9386bdd529bcc
3
+ size 23121823
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_20.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69fd196786681f883c9954c2dec9302e72035cb50b0c4df412c5253be740c95f
3
+ size 21617195
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_3.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a28b0a81d7be2d075f8e481979341f724b1d0b2729d7f36b342b3cb5915bf588
3
+ size 23913381
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_4.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97362b4263eb22c67bb3ba1e3a6dca6c1d731341f8f117173a342f73502a62c1
3
+ size 32341051
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_5.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef6cb8874c4ee25d67fa0527fab0aa3efc9035474a4f8850f860da3aecdbce9c
3
+ size 25007567
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_6.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3053087a4a1ecf91b00df521f75ced518e2a73666c2e01269485bebeb5eefdfc
3
+ size 20856015
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_7.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4130cb67e5b3175e204d1ccff29490712fd29d00da5192b2dfbfe3c73ce1e049
3
+ size 22289661
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_8.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:605b84699a31aadc3e8cb8c62c1434da9d5524d65ba1d18133928c773feec31d
3
+ size 42954841
ACE_checkpoints_3.5B/mHuBERT-147/manifest/MLS_9.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82d1e2ccfd56519c3308716f82ba4ed5816900b64dd702c5b4bc389528699f5e
3
+ size 37217047
ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_1.tsv ADDED
The diff for this file is too large to render. See raw diff
 
ACE_checkpoints_3.5B/mHuBERT-147/manifest/VL_10.tsv ADDED
The diff for this file is too large to render. See raw diff