a800 / sh /qwen_temperature.py
sgshdgdhsdg's picture
Upload folder sh
978215a verified
import os
import random
import numpy as np
import soundfile as sf
from transformers import Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniProcessor
from qwen_omni_utils import process_mm_info
# 加载模型和处理器
model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
"/data/liyangzhuo/models/Qwen2.5-Omni-7B",
torch_dtype="auto",
device_map="auto"
)
processor = Qwen2_5OmniProcessor.from_pretrained(
"/data/liyangzhuo/models/Qwen2.5-Omni-7B"
)
# 定义基础目录
base_input_dir = "/data/liyangzhuo/test/question"
base_output_dir = "/data/liyangzhuo/test/temperature"
# 设置是否在视频中使用音频
USE_AUDIO_IN_VIDEO = False
# 确保输出目录存在
os.makedirs(base_output_dir, exist_ok=True)
# 获取所有音频文件
audio_files = []
for filename in os.listdir(base_input_dir):
if filename.endswith('.wav'):
audio_files.append(os.path.join(base_input_dir, filename))
# 随机选择20条语音
selected_files = random.sample(audio_files, min(20, len(audio_files)))
# 设置temperature参数
temperature = 0.7
# 处理每个选中的音频文件
for idx, input_file in enumerate(selected_files, start=1):
print(f"正在处理第 {idx} 个文件: {input_file}")
# 读取输入音频
input_audio, sr = sf.read(input_file)
# 如果采样率不是24000,需要重采样
if sr != 24000:
import librosa
input_audio = librosa.resample(input_audio, orig_sr=sr, target_sr=24000)
sr = 24000
# 创建2秒的空白音频(24000 Hz * 2秒)
silence = np.zeros(24000 * 2)
# 生成4次回复
for reply_idx in range(1, 5):
print(f" 生成第 {reply_idx} 次回复...")
# 为当前音频文件创建对话
conversation = [
{
"role": "system",
"content": [
{"type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."}
],
},
{
"role": "user",
"content": [
{"type": "audio", "audio": input_file},
],
},
]
# 准备模型输入
text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
audios, images, videos = process_mm_info(conversation, use_audio_in_video=USE_AUDIO_IN_VIDEO)
inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True, use_audio_in_video=USE_AUDIO_IN_VIDEO)
inputs = inputs.to(model.device).to(model.dtype)
# 生成输出,添加temperature参数
text_ids, audio = model.generate(
**inputs,
use_audio_in_video=USE_AUDIO_IN_VIDEO,
temperature=temperature,
do_sample=True # 开启采样以使temperature生效
)
full_text = processor.batch_decode(
text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
# 提取模型回复
generated_text = full_text[0]
if "assistant" in generated_text:
model_prediction = generated_text.split("assistant", 1)[1].strip()
else:
model_prediction = generated_text.strip()
print(f" 回复文本: {model_prediction}")
# 获取生成的音频
generated_audio = audio.reshape(-1).detach().cpu().numpy()
# 拼接音频:输入 + 2秒空白 + 回复
combined_audio = np.concatenate([input_audio, silence, generated_audio])
# 构造输出文件名,格式为 qwen_0001_1.wav
output_filename = f"qwen_{idx:04d}_{reply_idx}.wav"
output_path = os.path.join(base_output_dir, output_filename)
# 保存拼接后的音频
sf.write(
output_path,
combined_audio,
samplerate=24000,
)
print(f" 已保存至 {output_path}")
print(f"完成第 {idx} 个文件的处理\n")
print("所有文件处理完成!")