debi1234 commited on
Commit
266830e
·
verified ·
1 Parent(s): 37c1a89

Upload 8-bit quantized VibeVoice-Large

Browse files
config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "acostic_vae_dim": 64,
3
+ "acoustic_tokenizer_config": {
4
+ "causal": true,
5
+ "channels": 1,
6
+ "conv_bias": true,
7
+ "conv_norm": "none",
8
+ "corpus_normalize": 0.0,
9
+ "decoder_depths": null,
10
+ "decoder_n_filters": 32,
11
+ "decoder_ratios": [
12
+ 8,
13
+ 5,
14
+ 5,
15
+ 4,
16
+ 2,
17
+ 2
18
+ ],
19
+ "disable_last_norm": true,
20
+ "encoder_depths": "3-3-3-3-3-3-8",
21
+ "encoder_n_filters": 32,
22
+ "encoder_ratios": [
23
+ 8,
24
+ 5,
25
+ 5,
26
+ 4,
27
+ 2,
28
+ 2
29
+ ],
30
+ "fix_std": 0.5,
31
+ "layer_scale_init_value": 1e-06,
32
+ "layernorm": "RMSNorm",
33
+ "layernorm_elementwise_affine": true,
34
+ "layernorm_eps": 1e-05,
35
+ "mixer_layer": "depthwise_conv",
36
+ "model_type": "vibevoice_acoustic_tokenizer",
37
+ "pad_mode": "constant",
38
+ "std_dist_type": "gaussian",
39
+ "vae_dim": 64,
40
+ "weight_init_value": 0.01
41
+ },
42
+ "architectures": [
43
+ "VibeVoiceForConditionalGeneration"
44
+ ],
45
+ "decoder_config": {
46
+ "attention_dropout": 0.0,
47
+ "hidden_act": "silu",
48
+ "hidden_size": 3584,
49
+ "initializer_range": 0.02,
50
+ "intermediate_size": 18944,
51
+ "max_position_embeddings": 32768,
52
+ "max_window_layers": 28,
53
+ "model_type": "qwen2",
54
+ "num_attention_heads": 28,
55
+ "num_hidden_layers": 28,
56
+ "num_key_value_heads": 4,
57
+ "rms_norm_eps": 1e-06,
58
+ "rope_scaling": null,
59
+ "rope_theta": 1000000.0,
60
+ "sliding_window": null,
61
+ "torch_dtype": "bfloat16",
62
+ "use_cache": true,
63
+ "use_mrope": false,
64
+ "use_sliding_window": false,
65
+ "vocab_size": 152064
66
+ },
67
+ "diffusion_head_config": {
68
+ "ddpm_batch_mul": 4,
69
+ "ddpm_beta_schedule": "cosine",
70
+ "ddpm_num_inference_steps": 20,
71
+ "ddpm_num_steps": 1000,
72
+ "diffusion_type": "ddpm",
73
+ "head_ffn_ratio": 3.0,
74
+ "head_layers": 4,
75
+ "hidden_size": 3584,
76
+ "latent_size": 64,
77
+ "model_type": "vibevoice_diffusion_head",
78
+ "prediction_type": "v_prediction",
79
+ "rms_norm_eps": 1e-05,
80
+ "speech_vae_dim": 64
81
+ },
82
+ "model_type": "vibevoice",
83
+ "semantic_tokenizer_config": {
84
+ "causal": true,
85
+ "channels": 1,
86
+ "conv_bias": true,
87
+ "conv_norm": "none",
88
+ "corpus_normalize": 0.0,
89
+ "disable_last_norm": true,
90
+ "encoder_depths": "3-3-3-3-3-3-8",
91
+ "encoder_n_filters": 32,
92
+ "encoder_ratios": [
93
+ 8,
94
+ 5,
95
+ 5,
96
+ 4,
97
+ 2,
98
+ 2
99
+ ],
100
+ "fix_std": 0,
101
+ "layer_scale_init_value": 1e-06,
102
+ "layernorm": "RMSNorm",
103
+ "layernorm_elementwise_affine": true,
104
+ "layernorm_eps": 1e-05,
105
+ "mixer_layer": "depthwise_conv",
106
+ "model_type": "vibevoice_semantic_tokenizer",
107
+ "pad_mode": "constant",
108
+ "std_dist_type": "none",
109
+ "vae_dim": 128,
110
+ "weight_init_value": 0.01
111
+ },
112
+ "semantic_vae_dim": 128,
113
+ "tie_word_embeddings": false,
114
+ "torch_dtype": "bfloat16",
115
+ "transformers_version": "4.51.3",
116
+ "quantization_config": {
117
+ "quant_method": "bitsandbytes",
118
+ "_load_in_8bit": true,
119
+ "_load_in_4bit": false,
120
+ "llm_int8_threshold": 6.0,
121
+ "llm_int8_skip_modules": null,
122
+ "llm_int8_enable_fp32_cpu_offload": false,
123
+ "llm_int8_has_fp16_weight": false,
124
+ "bnb_4bit_quant_type": "fp4",
125
+ "bnb_4bit_use_double_quant": false,
126
+ "bnb_4bit_compute_dtype": "float32",
127
+ "bnb_4bit_quant_storage": "uint8",
128
+ "load_in_4bit": false,
129
+ "load_in_8bit": true
130
+ },
131
+ "_quantization_method": "bitsandbytes"
132
+ }
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.52.4"
4
+ }
load_quantized_8bit.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ Load and use the 8-bit quantized VibeVoice model
4
+ """
5
+
6
+ import torch
7
+ from transformers import BitsAndBytesConfig
8
+ from vibevoice.modular.modeling_vibevoice_inference import VibeVoiceForConditionalGenerationInference
9
+ from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
10
+
11
+ def load_quantized_model(model_path="quantized_8bit"):
12
+ """Load the pre-quantized VibeVoice model"""
13
+
14
+ print("Loading 8-bit quantized VibeVoice model...")
15
+
16
+ # The model is already quantized, but we need to specify the config
17
+ # to ensure proper loading of quantized weights
18
+ bnb_config = BitsAndBytesConfig(
19
+ load_in_8bit=True,
20
+ bnb_8bit_compute_dtype=torch.bfloat16,
21
+
22
+
23
+ )
24
+
25
+ # Load processor
26
+ processor = VibeVoiceProcessor.from_pretrained(model_path)
27
+
28
+ # Load model
29
+ model = VibeVoiceForConditionalGenerationInference.from_pretrained(
30
+ model_path,
31
+ quantization_config=bnb_config,
32
+ device_map='cuda',
33
+ torch_dtype=torch.bfloat16,
34
+ )
35
+
36
+ model.eval()
37
+
38
+ print("✅ Model loaded successfully!")
39
+ print(f"💾 Memory usage: {torch.cuda.memory_allocated() / 1e9:.1f} GB")
40
+
41
+ return model, processor
42
+
43
+ # Example usage
44
+ if __name__ == "__main__":
45
+ model, processor = load_quantized_model()
46
+
47
+ # Generate audio
48
+ text = "Speaker 1: Hello! Speaker 2: Hi there!"
49
+ inputs = processor(
50
+ text=[text],
51
+ voice_samples=[["path/to/voice1.wav", "path/to/voice2.wav"]],
52
+ padding=True,
53
+ return_tensors="pt",
54
+ )
55
+
56
+ with torch.no_grad():
57
+ outputs = model.generate(**inputs)
58
+
59
+ # Save audio
60
+ processor.save_audio(outputs.speech_outputs[0], "output.wav")
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68f98075dac463766219e6e61ff5fe9ab969f8fea621a65906f1d6793f2eaf72
3
+ size 4987685394
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48940fb59366de226af5df46020f022d4d651f4563f190142c175b5bf733e9c7
3
+ size 4489976774
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d83c0514c0c9d2675cb4d51ee56b12515ea45770ce35acc5ab0ec4bc7d1bef73
3
+ size 1089994880
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "processor_class": "VibeVoiceProcessor",
3
+ "speech_tok_compress_ratio": 3200,
4
+ "db_normalize": true,
5
+ "audio_processor": {
6
+ "feature_extractor_type": "VibeVoiceTokenizerProcessor",
7
+ "sampling_rate": 24000,
8
+ "normalize_audio": true,
9
+ "target_dB_FS": -25,
10
+ "eps": 1e-06
11
+ }
12
+ }
quantization_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "quantization_config": {
3
+ "quant_method": "bitsandbytes",
4
+ "_load_in_8bit": true,
5
+ "_load_in_4bit": false,
6
+ "llm_int8_threshold": 6.0,
7
+ "llm_int8_skip_modules": null,
8
+ "llm_int8_enable_fp32_cpu_offload": false,
9
+ "llm_int8_has_fp16_weight": false,
10
+ "bnb_4bit_quant_type": "fp4",
11
+ "bnb_4bit_use_double_quant": false,
12
+ "bnb_4bit_compute_dtype": "float32",
13
+ "bnb_4bit_quant_storage": "uint8",
14
+ "load_in_4bit": false,
15
+ "load_in_8bit": true
16
+ },
17
+ "quantization_method": "bitsandbytes",
18
+ "bits": 8,
19
+ "quant_type": "nf4"
20
+ }