| import os |
| import json |
| import soundfile as sf |
| from transformers import Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniProcessor |
| from qwen_omni_utils import process_mm_info |
|
|
| |
| model = Qwen2_5OmniForConditionalGeneration.from_pretrained( |
| "/data/liyangzhuo/models/Qwen2.5-Omni-7B", |
| torch_dtype="auto", |
| device_map="auto" |
| ) |
| processor = Qwen2_5OmniProcessor.from_pretrained( |
| "/data/liyangzhuo/models/Qwen2.5-Omni-7B" |
| ) |
|
|
| |
| input_audio_dir = "/data/liyangzhuo/wavbench_volume/speed" |
| output_dir = "/data/liyangzhuo/wavbench_volume/result" |
| json_file_path = "/data/liyangzhuo/wavbench_volume/speed/explicit_understanding_speed.json" |
|
|
| |
| USE_AUDIO_IN_VIDEO = False |
|
|
| |
| TEMPERATURES = [0.8, 0.8, 0.8, 0.8] |
|
|
| |
| os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
| |
| with open(json_file_path, 'r', encoding='utf-8') as f: |
| volume_data = json.load(f) |
|
|
| |
| data_map = {item['id']: item for item in volume_data} |
|
|
| |
| input_files = [] |
| for filename in os.listdir(input_audio_dir): |
| if filename.endswith('.wav'): |
| input_files.append(os.path.join(input_audio_dir, filename)) |
|
|
| |
| input_files.sort() |
|
|
| |
| results = [] |
|
|
| print("开始处理Qwen音频对话任务...") |
| print(f"输入目录: {input_audio_dir}") |
| print(f"输入文件数量: {len(input_files)}") |
| print(f"输出目录: {output_dir}") |
| print(f"JSON文件: {json_file_path}") |
| print("=" * 70) |
|
|
| success_count = 0 |
| error_count = 0 |
|
|
| |
| for i, input_file in enumerate(input_files, 1): |
| try: |
| basename = os.path.basename(input_file) |
| file_id = os.path.splitext(basename)[0] |
| |
| print(f"\n🔄 处理第 {i}/{len(input_files)} 个文件: {basename}") |
| |
| |
| if not os.path.exists(input_file): |
| print(f" ❌ 错误: 文件不存在 {input_file}") |
| error_count += 1 |
| continue |
| |
| |
| original_audio, original_sr = sf.read(input_file) |
| audio_duration = len(original_audio)/original_sr |
| print(f" 📁 音频信息: 采样率 {original_sr}Hz, 时长 {audio_duration:.2f}s") |
| |
| |
| question_text = "" |
| label = "" |
| if file_id in data_map: |
| question_text = data_map[file_id].get('text', '') |
| label = data_map[file_id].get('label', '') |
| print(f" 📝 问题: {question_text[:80]}...") |
| print(f" 🏷️ 标签: {label}") |
| else: |
| print(f" ⚠️ 警告: 在JSON文件中未找到 {file_id} 的信息") |
| |
| |
| conversation = [ |
| { |
| "role": "system", |
| "content": [ |
| {"type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."} |
| ], |
| }, |
| { |
| "role": "user", |
| "content": [ |
| {"type": "audio", "audio": input_file}, |
| ], |
| }, |
| ] |
| |
| |
| text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) |
| audios, images, videos = process_mm_info(conversation, use_audio_in_video=USE_AUDIO_IN_VIDEO) |
| inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True, use_audio_in_video=USE_AUDIO_IN_VIDEO) |
| inputs = inputs.to(model.device).to(model.dtype) |
| |
| |
| responses = [] |
| |
| print(" 🤖 正在生成4次回复(不同temperature)...") |
| for temp_idx, temperature in enumerate(TEMPERATURES, 1): |
| print(f" 📝 生成第 {temp_idx}/4 次 (temperature={temperature})...") |
| |
| |
| text_ids = model.generate( |
| **inputs, |
| use_audio_in_video=USE_AUDIO_IN_VIDEO, |
| return_audio=False, |
| temperature=temperature, |
| do_sample=True, |
| max_new_tokens=512 |
| ) |
| |
| full_text = processor.batch_decode( |
| text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
| |
| |
| generated_text = full_text[0] |
| if "assistant" in generated_text: |
| model_response = generated_text.split("assistant", 1)[1].strip() |
| else: |
| model_response = generated_text.strip() |
| |
| responses.append({ |
| "temperature": temperature, |
| "response": model_response |
| }) |
| |
| print(f" 回复预览: {model_response[:80]}...") |
| |
| print(f" ✅ 完成4次文本生成") |
| |
| |
| |
| result_entry = { |
| "id": file_id, |
| "input_file": basename, |
| "question": question_text, |
| "label": label, |
| "audio_duration_seconds": round(audio_duration, 2), |
| "responses": responses |
| } |
| results.append(result_entry) |
| |
| success_count += 1 |
| |
| except Exception as e: |
| print(f" ❌ 处理失败: {e}") |
| error_count += 1 |
| continue |
|
|
| |
| result_json_path = os.path.join(output_dir, "speed_results.json") |
| with open(result_json_path, 'w', encoding='utf-8') as f: |
| json.dump(results, f, ensure_ascii=False, indent=2) |
|
|
| print("\n" + "=" * 70) |
| print("📊 处理完成统计:") |
| print(f" ✅ 成功处理: {success_count} 个文件") |
| print(f" ❌ 处理失败: {error_count} 个文件") |
| print(f" 📁 输出位置: {output_dir}") |
| if success_count > 0: |
| print(f" 📋 结果JSON: {result_json_path}") |
| print("=" * 70) |
| print("🎉 任务完成!") |