a800 / sh /qwen_test.py
sgshdgdhsdg's picture
Upload folder sh
978215a verified
import os
import json
import soundfile as sf
from transformers import Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniProcessor
from qwen_omni_utils import process_mm_info
# 加载模型和处理器
model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
"/data/liyangzhuo/models/Qwen2.5-Omni-7B",
torch_dtype="auto",
device_map="auto"
)
processor = Qwen2_5OmniProcessor.from_pretrained(
"/data/liyangzhuo/models/Qwen2.5-Omni-7B"
)
# 定义输入输出目录
input_audio_dir = "/data/liyangzhuo/wavbench_volume/speed"
output_dir = "/data/liyangzhuo/wavbench_volume/result"
json_file_path = "/data/liyangzhuo/wavbench_volume/speed/explicit_understanding_speed.json"
# 设置是否在视频中使用音频
USE_AUDIO_IN_VIDEO = False
# 设置temperature参数列表(用于生成多样性)
TEMPERATURES = [0.8, 0.8, 0.8, 0.8]
# 确保输出目录存在
os.makedirs(output_dir, exist_ok=True)
# 读取JSON文件,获取所有音频文件信息
with open(json_file_path, 'r', encoding='utf-8') as f:
volume_data = json.load(f)
# 创建id到数据的映射
data_map = {item['id']: item for item in volume_data}
# 获取输入目录中的所有wav文件
input_files = []
for filename in os.listdir(input_audio_dir):
if filename.endswith('.wav'):
input_files.append(os.path.join(input_audio_dir, filename))
# 按文件名排序
input_files.sort()
# 准备存储结果的列表
results = []
print("开始处理Qwen音频对话任务...")
print(f"输入目录: {input_audio_dir}")
print(f"输入文件数量: {len(input_files)}")
print(f"输出目录: {output_dir}")
print(f"JSON文件: {json_file_path}")
print("=" * 70)
success_count = 0
error_count = 0
# 依次处理每个音频文件
for i, input_file in enumerate(input_files, 1):
try:
basename = os.path.basename(input_file)
file_id = os.path.splitext(basename)[0] # 获取文件名(不含扩展名)
print(f"\n🔄 处理第 {i}/{len(input_files)} 个文件: {basename}")
# 检查输入文件是否存在
if not os.path.exists(input_file):
print(f" ❌ 错误: 文件不存在 {input_file}")
error_count += 1
continue
# 检查音频文件(仅为了验证文件有效性)
original_audio, original_sr = sf.read(input_file)
audio_duration = len(original_audio)/original_sr
print(f" 📁 音频信息: 采样率 {original_sr}Hz, 时长 {audio_duration:.2f}s")
# 获取对应的问题文本和标签
question_text = ""
label = ""
if file_id in data_map:
question_text = data_map[file_id].get('text', '')
label = data_map[file_id].get('label', '')
print(f" 📝 问题: {question_text[:80]}...")
print(f" 🏷️ 标签: {label}")
else:
print(f" ⚠️ 警告: 在JSON文件中未找到 {file_id} 的信息")
# 为当前音频文件创建对话
conversation = [
{
"role": "system",
"content": [
{"type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."}
],
},
{
"role": "user",
"content": [
{"type": "audio", "audio": input_file},
],
},
]
# 准备模型输入
text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
audios, images, videos = process_mm_info(conversation, use_audio_in_video=USE_AUDIO_IN_VIDEO)
inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True, use_audio_in_video=USE_AUDIO_IN_VIDEO)
inputs = inputs.to(model.device).to(model.dtype)
# 存储多次生成的回复
responses = []
print(" 🤖 正在生成4次回复(不同temperature)...")
for temp_idx, temperature in enumerate(TEMPERATURES, 1):
print(f" 📝 生成第 {temp_idx}/4 次 (temperature={temperature})...")
# 生成文本输出(禁用音频生成)
text_ids = model.generate(
**inputs,
use_audio_in_video=USE_AUDIO_IN_VIDEO,
return_audio=False,
temperature=temperature,
do_sample=True, # 启用采样以使temperature生效
max_new_tokens=512 # 限制生成长度
)
full_text = processor.batch_decode(
text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
# 提取模型回复文本
generated_text = full_text[0]
if "assistant" in generated_text:
model_response = generated_text.split("assistant", 1)[1].strip()
else:
model_response = generated_text.strip()
responses.append({
"temperature": temperature,
"response": model_response
})
print(f" 回复预览: {model_response[:80]}...")
print(f" ✅ 完成4次文本生成")
# 保存结果到列表
result_entry = {
"id": file_id,
"input_file": basename,
"question": question_text,
"label": label,
"audio_duration_seconds": round(audio_duration, 2),
"responses": responses # 包含4次不同temperature的回复
}
results.append(result_entry)
success_count += 1
except Exception as e:
print(f" ❌ 处理失败: {e}")
error_count += 1
continue
# 保存所有结果到JSON文件
result_json_path = os.path.join(output_dir, "speed_results.json")
with open(result_json_path, 'w', encoding='utf-8') as f:
json.dump(results, f, ensure_ascii=False, indent=2)
print("\n" + "=" * 70)
print("📊 处理完成统计:")
print(f" ✅ 成功处理: {success_count} 个文件")
print(f" ❌ 处理失败: {error_count} 个文件")
print(f" 📁 输出位置: {output_dir}")
if success_count > 0:
print(f" 📋 结果JSON: {result_json_path}")
print("=" * 70)
print("🎉 任务完成!")