marksverdhai commited on
Commit
7e3e3c2
·
verified ·
1 Parent(s): 9f913ca

Upload merged Norwegian fine-tuned VibeVoice-7B model

Browse files
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: vibevoice-community
4
+ license_link: https://huggingface.co/vibevoice/VibeVoice-7B/blob/main/LICENSE
5
+ base_model: vibevoice/VibeVoice-7B
6
+ tags:
7
+ - tts
8
+ - text-to-speech
9
+ - speech-synthesis
10
+ - norwegian
11
+ - bokmal
12
+ - nynorsk
13
+ language:
14
+ - "no"
15
+ - nb
16
+ - nn
17
+ ---
18
+
19
+ # VibeVoice 7B Norwegian
20
+
21
+ This is a Norwegian fine-tuned version of [VibeVoice-7B](https://huggingface.co/vibevoice/VibeVoice-7B), a state-of-the-art text-to-speech model.
22
+
23
+ ## Training Details
24
+
25
+ This model was trained using a progressive 3-stage fine-tuning approach:
26
+
27
+ 1. **Stage 1**: Initial Norwegian (Bokmal) training on Mozilla Common Voice
28
+ 2. **Stage 2**: Continued training on broader Norwegian data
29
+ 3. **Stage 3**: Dialect-specific fine-tuning for Ostnorsk/Oslo dialect
30
+
31
+ The LoRA adapter was merged into the base model weights to create this standalone fine-tuned model.
32
+
33
+ ### Training Configuration
34
+ - LoRA rank: 32
35
+ - LoRA alpha: 128
36
+ - Target modules: q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj
37
+ - Diffusion head: Fully trained
38
+ - Precision: bfloat16
39
+
40
+ ## Usage
41
+
42
+ ```python
43
+ from transformers import AutoProcessor, AutoModel
44
+ import torch
45
+
46
+ processor = AutoProcessor.from_pretrained("heiertech/vibevoice-7b-norwegian")
47
+ model = AutoModel.from_pretrained("heiertech/vibevoice-7b-norwegian", torch_dtype=torch.bfloat16)
48
+
49
+ # Generate speech
50
+ text = "Hei, dette er en test av den norske stemmen."
51
+ inputs = processor(text=text, return_tensors="pt")
52
+ outputs = model.generate(**inputs)
53
+ ```
54
+
55
+ ## License
56
+
57
+ This model inherits the license from the base VibeVoice-7B model. Please see the [original license](https://huggingface.co/vibevoice/VibeVoice-7B/blob/main/LICENSE) for details.
58
+
59
+ ## Acknowledgments
60
+
61
+ - Base model: [vibevoice/VibeVoice-7B](https://huggingface.co/vibevoice/VibeVoice-7B)
62
+ - Training data: Mozilla Common Voice Norwegian
config.json ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": false,
3
+ "acostic_vae_dim": 64,
4
+ "acoustic_tokenizer_config": {
5
+ "causal": true,
6
+ "channels": 1,
7
+ "conv_bias": true,
8
+ "conv_norm": "none",
9
+ "corpus_normalize": 0.0,
10
+ "decoder_depths": null,
11
+ "decoder_n_filters": 32,
12
+ "decoder_ratios": [
13
+ 8,
14
+ 5,
15
+ 5,
16
+ 4,
17
+ 2,
18
+ 2
19
+ ],
20
+ "disable_last_norm": true,
21
+ "dtype": "bfloat16",
22
+ "encoder_depths": "3-3-3-3-3-3-8",
23
+ "encoder_n_filters": 32,
24
+ "encoder_ratios": [
25
+ 8,
26
+ 5,
27
+ 5,
28
+ 4,
29
+ 2,
30
+ 2
31
+ ],
32
+ "fix_std": 0.5,
33
+ "layer_scale_init_value": 1e-06,
34
+ "layernorm": "RMSNorm",
35
+ "layernorm_elementwise_affine": true,
36
+ "layernorm_eps": 1e-05,
37
+ "mixer_layer": "depthwise_conv",
38
+ "model_type": "vibevoice_acoustic_tokenizer",
39
+ "pad_mode": "constant",
40
+ "std_dist_type": "gaussian",
41
+ "vae_dim": 64,
42
+ "weight_init_value": 0.01
43
+ },
44
+ "acoustic_vae_dim": 64,
45
+ "architectures": [
46
+ "VibeVoiceForConditionalGeneration"
47
+ ],
48
+ "decoder_config": {
49
+ "attention_dropout": 0.0,
50
+ "dtype": "bfloat16",
51
+ "hidden_act": "silu",
52
+ "hidden_size": 3584,
53
+ "initializer_range": 0.02,
54
+ "intermediate_size": 18944,
55
+ "layer_types": [
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention",
74
+ "full_attention",
75
+ "full_attention",
76
+ "full_attention",
77
+ "full_attention",
78
+ "full_attention",
79
+ "full_attention",
80
+ "full_attention",
81
+ "full_attention",
82
+ "full_attention",
83
+ "full_attention"
84
+ ],
85
+ "max_position_embeddings": 32768,
86
+ "max_window_layers": 28,
87
+ "model_type": "qwen2",
88
+ "num_attention_heads": 28,
89
+ "num_hidden_layers": 28,
90
+ "num_key_value_heads": 4,
91
+ "rms_norm_eps": 1e-06,
92
+ "rope_scaling": null,
93
+ "rope_theta": 1000000.0,
94
+ "sliding_window": null,
95
+ "use_cache": true,
96
+ "use_mrope": false,
97
+ "use_sliding_window": false,
98
+ "vocab_size": 152064
99
+ },
100
+ "diffusion_head_config": {
101
+ "ddpm_batch_mul": 4,
102
+ "ddpm_beta_schedule": "cosine",
103
+ "ddpm_num_inference_steps": 20,
104
+ "ddpm_num_steps": 1000,
105
+ "diffusion_type": "ddpm",
106
+ "dtype": "bfloat16",
107
+ "head_ffn_ratio": 3.0,
108
+ "head_layers": 4,
109
+ "hidden_size": 3584,
110
+ "latent_size": 64,
111
+ "model_type": "vibevoice_diffusion_head",
112
+ "prediction_type": "v_prediction",
113
+ "rms_norm_eps": 1e-05,
114
+ "speech_vae_dim": 64
115
+ },
116
+ "dtype": "bfloat16",
117
+ "model_type": "vibevoice",
118
+ "semantic_tokenizer_config": {
119
+ "causal": true,
120
+ "channels": 1,
121
+ "conv_bias": true,
122
+ "conv_norm": "none",
123
+ "corpus_normalize": 0.0,
124
+ "disable_last_norm": true,
125
+ "dtype": "bfloat16",
126
+ "encoder_depths": "3-3-3-3-3-3-8",
127
+ "encoder_n_filters": 32,
128
+ "encoder_ratios": [
129
+ 8,
130
+ 5,
131
+ 5,
132
+ 4,
133
+ 2,
134
+ 2
135
+ ],
136
+ "fix_std": 0,
137
+ "layer_scale_init_value": 1e-06,
138
+ "layernorm": "RMSNorm",
139
+ "layernorm_elementwise_affine": true,
140
+ "layernorm_eps": 1e-05,
141
+ "mixer_layer": "depthwise_conv",
142
+ "model_type": "vibevoice_semantic_tokenizer",
143
+ "pad_mode": "constant",
144
+ "std_dist_type": "none",
145
+ "vae_dim": 128,
146
+ "weight_init_value": 0.01
147
+ },
148
+ "semantic_vae_dim": 128,
149
+ "tie_word_embeddings": false,
150
+ "transformers_version": "4.57.3"
151
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb423717f01ba6aa890e122cf7f662f71a8a4ea0431f218aa62a1f7174b2b6fd
3
+ size 4877662532
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71d015dcc94e9d00cc4b4bafc57b567768bd723acab834e395364c21533ffbff
3
+ size 4932752840
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8489291ef93f10bcbd2042df9bba572e1d60972b3c99655c99a796c5ff3753b
3
+ size 4982901128
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12575fd8c03051efa93e5f14740b5bf80e43de2f4d7fce4b21ed3f24f9d35ea3
3
+ size 3893553730
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "processor_class": "VibeVoiceProcessor",
3
+ "speech_tok_compress_ratio": 3200,
4
+ "db_normalize": true,
5
+ "audio_processor": {
6
+ "feature_extractor_type": "VibeVoiceTokenizerProcessor",
7
+ "sampling_rate": 24000,
8
+ "normalize_audio": true,
9
+ "target_dB_FS": -25,
10
+ "eps": 1e-06
11
+ }
12
+ }