ylgatatooine commited on
Commit
b607631
·
verified ·
1 Parent(s): 2aa66aa

Upload 10 files

Browse files
Files changed (10) hide show
  1. README.md +62 -3
  2. __init__.py +0 -0
  3. config.json +11 -0
  4. configuration_bigcodec.py +19 -0
  5. modeling_xcodec2.py +165 -0
  6. module.py +0 -0
  7. pytorch_model.bin +3 -0
  8. reconstructed.wav +0 -0
  9. test.flac +0 -0
  10. test.py +21 -0
README.md CHANGED
@@ -1,3 +1,62 @@
1
- ---
2
- license: cc-by-nc-nd-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ tags:
4
+ - audio-to-audio
5
+ pipeline_tag: audio-to-audio
6
+ ---
7
+
8
+
9
+ ## Paper
10
+ LLaSA: Scaling Train Time and Test Time Compute for LLaMA based Speech Synthesis (Comming soon)
11
+
12
+ Codec Does Matter: Exploring the Semantic Shortcoming of Codec for Audio Language Model (AAAI 2025, xcodec 1.0)
13
+
14
+
15
+ # Getting Started with XCodec2 on Hugging Face
16
+ XCodec2 is a speech tokenizer that offers the following key features:
17
+
18
+ 1. **Single Vector Quantization**
19
+ 2. **50 Tokens per Second**
20
+ 3. **Multilingual Speech Semantic Support and High-Quality Speech Reconstruction**
21
+
22
+
23
+ To use `xcodec2`, ensure you have it installed. You can install it using the following command:
24
+
25
+ ```bash
26
+ conda create -n xcodec2 python=3.9
27
+ conda activate xcodec2
28
+ pip install xcodec2==0.1.3 (Fix the bug in the previous version to achieve better sound quality)
29
+ ```
30
+ Then,
31
+ ```python
32
+ import torch
33
+ import soundfile as sf
34
+ from transformers import AutoConfig
35
+
36
+
37
+ from xcodec2.modeling_xcodec2 import XCodec2Model
38
+
39
+ model_path = "HKUST-Audio/xcodec2"
40
+
41
+ model = XCodec2Model.from_pretrained(model_path)
42
+ model.eval().cuda()
43
+
44
+
45
+ wav, sr = sf.read("test.wav")
46
+ wav_tensor = torch.from_numpy(wav).float().unsqueeze(0) # Shape: (1, T)
47
+
48
+
49
+ with torch.no_grad():
50
+ # Only 16khz speech
51
+ # Only supports single input. For batch inference, please refer to the link below.
52
+ vq_code = model.encode_code(input_waveform=wav_tensor)
53
+ print("Code:", vq_code )
54
+
55
+ recon_wav = model.decode_code(vq_code).cpu() # Shape: (1, 1, T')
56
+
57
+
58
+ sf.write("reconstructed.wav", recon_wav[0, 0, :].numpy(), sr)
59
+ print("Done! Check reconstructed.wav")
60
+ ```
61
+
62
+ # If you want to train your own xcodec2, batch inference, or large-scale code extraction, the code is released [here](https://github.com/zhenye234/X-Codec-2.0).
__init__.py ADDED
File without changes
config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "xcodec2",
3
+ "semantic_hidden_size": 1024,
4
+ "codec_encoder_hidden_size": 1024,
5
+ "codec_decoder_hidden_size": 1024,
6
+ "use_vocos": true,
7
+ "architectures": [
8
+ "XCodec2Model"
9
+ ]
10
+ }
11
+
configuration_bigcodec.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class BigCodecConfig(PretrainedConfig):
4
+ model_type = "bigcodec"
5
+
6
+ def __init__(
7
+ self,
8
+ # 下面这些只是示例超参
9
+ semantic_hidden_size=1024,
10
+ codec_encoder_hidden_size=1024,
11
+ codec_decoder_hidden_size=1024,
12
+ use_vocos=True,
13
+ **kwargs
14
+ ):
15
+ super().__init__(**kwargs)
16
+ self.semantic_hidden_size = semantic_hidden_size
17
+ self.codec_encoder_hidden_size = codec_encoder_hidden_size
18
+ self.codec_decoder_hidden_size = codec_decoder_hidden_size
19
+ self.use_vocos = use_vocos
modeling_xcodec2.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import PreTrainedModel
4
+ from configuration_bigcodec import BigCodecConfig
5
+
6
+ # 请确保这些模块路径是正确的
7
+ from vq.codec_encoder import CodecEncoder_Transformer
8
+ from vq.codec_decoder_vocos import CodecDecoderVocos
9
+ from vq.module import SemanticEncoder
10
+ from transformers import AutoFeatureExtractor, Wav2Vec2BertModel
11
+
12
+ class XCodec2Model(PreTrainedModel):
13
+ config_class = BigCodecConfig
14
+
15
+ def __init__(self, config: BigCodecConfig):
16
+ super().__init__(config)
17
+
18
+ # 1) 语义模型
19
+ self.semantic_model = Wav2Vec2BertModel.from_pretrained(
20
+ "facebook/w2v-bert-2.0",
21
+ output_hidden_states=True
22
+ )
23
+ self.semantic_model.eval()
24
+
25
+ self.SemanticEncoder_module = SemanticEncoder(
26
+ config.semantic_hidden_size,
27
+ config.semantic_hidden_size,
28
+ config.semantic_hidden_size
29
+ )
30
+
31
+ # 2) Codec Encoder
32
+ self.CodecEnc = CodecEncoder_Transformer()
33
+
34
+ # 3) Codec Decoder
35
+ self.generator = CodecDecoderVocos()
36
+
37
+ # 4) 两个全连接层
38
+ self.fc_prior = nn.Linear(2048, 2048)
39
+ self.fc_post_a = nn.Linear(2048, 1024)
40
+ feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0")
41
+ self.feature_extractor = feature_extractor
42
+
43
+ def forward(self, input_waveform, sample_rate=16000):
44
+ """
45
+ 这里的 forward 不一定要叫 forward,也可以拆成别的方法;
46
+ 但是如果想兼容 pipeline,需要在 forward 里给出核心逻辑。
47
+
48
+ 参数:
49
+ input_waveform: [batch_size, waveform_length]
50
+ sample_rate: 默认 16000
51
+ 返回:
52
+ 重构后的语音音频 (Tensor)
53
+ """
54
+ # 1) 特征提取
55
+ # 如果需要 padding,可以在这里做
56
+ input_features = self.feature_extractor(
57
+ input_waveform,
58
+ sampling_rate=sample_rate,
59
+ return_tensors="pt"
60
+ ).input_features.to(self.device) # [batch, frames, feat_dim]
61
+
62
+ # 2) 语义层
63
+ semantic_output = self.semantic_model(input_features)
64
+ semantic_hidden_16 = semantic_output.hidden_states[16] # 取第16层
65
+ semantic_hidden_16 = semantic_hidden_16.transpose(1, 2) # [batch, hidden_dim, frames]
66
+ semantic_encoded = self.SemanticEncoder_module(semantic_hidden_16)
67
+
68
+ # 3) codec encoder
69
+ wav = input_waveform.unsqueeze(1).to(self.device) # shape: [batch, 1, time]
70
+ vq_emb = self.CodecEnc(wav) # [batch, time//down, 1024] 只是示例
71
+ vq_emb = vq_emb.transpose(1, 2) # -> [batch, 1024, frames]
72
+
73
+ # 对齐语义向量的时间帧数,这里只做示例处理
74
+ # 真实做法里可能要先对齐维度
75
+ if vq_emb.shape[-1] != semantic_encoded.shape[-1]:
76
+ # 简单强行截断或补零都行,需要你自己决定
77
+ min_len = min(vq_emb.shape[-1], semantic_encoded.shape[-1])
78
+ vq_emb = vq_emb[:, :, :min_len]
79
+ semantic_encoded = semantic_encoded[:, :, :min_len]
80
+
81
+ # 4) 拼接
82
+ concat_emb = torch.cat([semantic_encoded, vq_emb], dim=1) # [batch, 1024 + 1024, frames]
83
+
84
+ # 5) fc_prior
85
+ concat_emb = self.fc_prior(concat_emb.transpose(1, 2)).transpose(1, 2)
86
+
87
+ # 6) decoder 的量化部分
88
+ _, vq_code, _ = self.generator(concat_emb, vq=True)
89
+ vq_post_emb = self.generator.quantizer.get_output_from_indices(vq_code.transpose(1, 2))
90
+ vq_post_emb = vq_post_emb.transpose(1, 2)
91
+
92
+ # 7) fc_post_a
93
+ vq_post_emb = self.fc_post_a(vq_post_emb.transpose(1, 2)).transpose(1, 2)
94
+
95
+ # 8) 最后解码成波形
96
+ recon_audio = self.generator(vq_post_emb.transpose(1, 2), vq=False)[0]
97
+ # recon_audio: [batch, time]
98
+ return recon_audio
99
+
100
+ def encode_code(self, input_waveform, sample_rate=16000):
101
+ """
102
+ 将输入的音频编码为代码表示。
103
+
104
+ 参数:
105
+ input_waveform: [batch_size, waveform_length]
106
+ sample_rate: 默认 16000
107
+ 返回:
108
+ 编码后的代码 (Tensor)
109
+ """
110
+ with torch.no_grad():
111
+ # 1) 特征提取
112
+ input_features = self.feature_extractor(
113
+ input_waveform,
114
+ sampling_rate=sample_rate,
115
+ return_tensors="pt"
116
+ ).input_features.to(self.device) # [batch, frames, feat_dim]
117
+
118
+ # 2) 语义层
119
+ semantic_output = self.semantic_model(input_features)
120
+ semantic_hidden_16 = semantic_output.hidden_states[16] # 取第16层
121
+ semantic_hidden_16 = semantic_hidden_16.transpose(1, 2) # [batch, hidden_dim, frames]
122
+ semantic_encoded = self.SemanticEncoder_module(semantic_hidden_16)
123
+
124
+ # 3) codec encoder
125
+ wav = input_waveform.unsqueeze(1).to(self.device) # shape: [batch, 1, time]
126
+ vq_emb = self.CodecEnc(wav) # [batch, time//down, 1024] 只是示例
127
+ vq_emb = vq_emb.transpose(1, 2) # -> [batch, 1024, frames]
128
+
129
+ # 对齐语义向量的时间帧数,这里只做示例处理
130
+ if vq_emb.shape[-1] != semantic_encoded.shape[-1]:
131
+ min_len = min(vq_emb.shape[-1], semantic_encoded.shape[-1])
132
+ vq_emb = vq_emb[:, :, :min_len]
133
+ semantic_encoded = semantic_encoded[:, :, :min_len]
134
+
135
+ # 4) 拼接
136
+ concat_emb = torch.cat([semantic_encoded, vq_emb], dim=1) # [batch, 2048, frames]
137
+
138
+ # 5) fc_prior
139
+ concat_emb = self.fc_prior(concat_emb.transpose(1, 2)).transpose(1, 2)
140
+
141
+ # 6) decoder 的量化部分,获取code
142
+ _, vq_code, _ = self.generator(concat_emb, vq=True)
143
+ # vq_code: [batch, frames]
144
+ return vq_code
145
+
146
+ def decode_code(self, vq_code):
147
+ """
148
+ 将编码后的代码解码回音频。
149
+
150
+ 参数:
151
+ vq_code: 编码后的代码 (Tensor) [batch, frames]
152
+ 返回:
153
+ 解码后的音频 (Tensor) [batch, waveform_length]
154
+ """
155
+ with torch.no_grad():
156
+ # 获取量化后的嵌入
157
+ vq_post_emb = self.generator.quantizer.get_output_from_indices(vq_code.transpose(1, 2))
158
+ vq_post_emb = vq_post_emb.transpose(1, 2) # [batch, 1024, frames]
159
+
160
+ # 7) fc_post_a
161
+ vq_post_emb = self.fc_post_a(vq_post_emb.transpose(1, 2)).transpose(1, 2) # [batch, 1024, frames]
162
+
163
+ # 8) 最后解码成波形
164
+ recon_audio = self.generator(vq_post_emb.transpose(1, 2), vq=False)[0] # [batch, time]
165
+ return recon_audio
module.py ADDED
File without changes
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cb939062f3930e56ff22082f49c95461aedc8ceade7ff7b16a1b10f1e92e0be
3
+ size 3291343655
reconstructed.wav ADDED
Binary file (157 kB). View file
 
test.flac ADDED
Binary file (97.2 kB). View file
 
test.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import soundfile as sf
3
+ from transformers import AutoConfig
4
+
5
+ from modeling_xcodec2 import XCodec2Model
6
+
7
+ model_path = "/data/zheny/xcodec2" # 这是你在 huggingface 上的仓库名
8
+
9
+ model = XCodec2Model.from_pretrained(model_path)
10
+ model.eval().cuda()
11
+
12
+ # 准备一段音频
13
+ wav, sr = sf.read("test.flac")
14
+ wav_tensor = torch.from_numpy(wav).float().unsqueeze(0) # [1, time]
15
+
16
+ with torch.no_grad():
17
+ vq_code = model.encode_code(input_waveform=wav_tensor )
18
+ print(vq_code)
19
+ recon_wav = model.decode_code(vq_code).cpu()
20
+
21
+ sf.write("reconstructed.wav", recon_wav[0,0,:].numpy(), sr)