| import os |
| import random |
| import numpy as np |
| import soundfile as sf |
| from transformers import Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniProcessor |
| from qwen_omni_utils import process_mm_info |
|
|
| |
| model = Qwen2_5OmniForConditionalGeneration.from_pretrained( |
| "/data/liyangzhuo/models/Qwen2.5-Omni-7B", |
| torch_dtype="auto", |
| device_map="auto" |
| ) |
| processor = Qwen2_5OmniProcessor.from_pretrained( |
| "/data/liyangzhuo/models/Qwen2.5-Omni-7B" |
| ) |
|
|
| |
| base_input_dir = "/data/liyangzhuo/test/question" |
| base_output_dir = "/data/liyangzhuo/test/temperature" |
|
|
| |
| USE_AUDIO_IN_VIDEO = False |
|
|
| |
| os.makedirs(base_output_dir, exist_ok=True) |
|
|
| |
| audio_files = [] |
| for filename in os.listdir(base_input_dir): |
| if filename.endswith('.wav'): |
| audio_files.append(os.path.join(base_input_dir, filename)) |
|
|
| |
| selected_files = random.sample(audio_files, min(20, len(audio_files))) |
|
|
| |
| temperature = 0.7 |
|
|
| |
| for idx, input_file in enumerate(selected_files, start=1): |
| print(f"正在处理第 {idx} 个文件: {input_file}") |
| |
| |
| input_audio, sr = sf.read(input_file) |
| |
| |
| if sr != 24000: |
| import librosa |
| input_audio = librosa.resample(input_audio, orig_sr=sr, target_sr=24000) |
| sr = 24000 |
| |
| |
| silence = np.zeros(24000 * 2) |
| |
| |
| for reply_idx in range(1, 5): |
| print(f" 生成第 {reply_idx} 次回复...") |
| |
| |
| conversation = [ |
| { |
| "role": "system", |
| "content": [ |
| {"type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."} |
| ], |
| }, |
| { |
| "role": "user", |
| "content": [ |
| {"type": "audio", "audio": input_file}, |
| ], |
| }, |
| ] |
| |
| |
| text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) |
| audios, images, videos = process_mm_info(conversation, use_audio_in_video=USE_AUDIO_IN_VIDEO) |
| inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True, use_audio_in_video=USE_AUDIO_IN_VIDEO) |
| inputs = inputs.to(model.device).to(model.dtype) |
| |
| |
| text_ids, audio = model.generate( |
| **inputs, |
| use_audio_in_video=USE_AUDIO_IN_VIDEO, |
| temperature=temperature, |
| do_sample=True |
| ) |
| full_text = processor.batch_decode( |
| text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
|
|
| |
| generated_text = full_text[0] |
| if "assistant" in generated_text: |
| model_prediction = generated_text.split("assistant", 1)[1].strip() |
| else: |
| model_prediction = generated_text.strip() |
| print(f" 回复文本: {model_prediction}") |
| |
| |
| generated_audio = audio.reshape(-1).detach().cpu().numpy() |
| |
| |
| combined_audio = np.concatenate([input_audio, silence, generated_audio]) |
| |
| |
| output_filename = f"qwen_{idx:04d}_{reply_idx}.wav" |
| output_path = os.path.join(base_output_dir, output_filename) |
| |
| |
| sf.write( |
| output_path, |
| combined_audio, |
| samplerate=24000, |
| ) |
| |
| print(f" 已保存至 {output_path}") |
| |
| print(f"完成第 {idx} 个文件的处理\n") |
|
|
| print("所有文件处理完成!") |