|
|
import soundfile as sf |
|
|
import torch |
|
|
from transformers import AutoModelForCausalLM, AutoProcessor |
|
|
|
|
|
from qwen_omni_utils import process_mm_info |
|
|
|
|
|
MODEL_PATH = "Qwen/Qwen3-Omni-30B-A3B-Instruct" |
|
|
|
|
|
|
|
|
print("Loading model...") |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_PATH, |
|
|
dtype="auto", |
|
|
device_map="auto", |
|
|
attn_implementation="flash_attention_2", |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
|
|
|
print("Loading processor...") |
|
|
processor = AutoProcessor.from_pretrained( |
|
|
MODEL_PATH, |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
conversation = [ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{"type": "image", "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-Omni/demo/cars.jpg"}, |
|
|
{"type": "audio", "audio": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-Omni/demo/cough.wav"}, |
|
|
{"type": "text", "text": "What can you see and hear? Answer in one short sentence."} |
|
|
], |
|
|
}, |
|
|
] |
|
|
|
|
|
USE_AUDIO_IN_VIDEO = True |
|
|
|
|
|
|
|
|
print("Processing inputs...") |
|
|
text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) |
|
|
|
|
|
|
|
|
audios, images, videos = process_mm_info(conversation, use_audio_in_video=USE_AUDIO_IN_VIDEO) |
|
|
|
|
|
inputs = processor( |
|
|
text=text, |
|
|
audio=audios, |
|
|
images=images, |
|
|
videos=videos, |
|
|
return_tensors="pt", |
|
|
padding=True, |
|
|
use_audio_in_video=USE_AUDIO_IN_VIDEO |
|
|
) |
|
|
|
|
|
inputs = inputs.to(model.device).to(model.dtype) |
|
|
|
|
|
|
|
|
print("Generating...") |
|
|
text_ids, audio = model.generate( |
|
|
**inputs, |
|
|
speaker="Ethan", |
|
|
thinker_return_dict_in_generate=True, |
|
|
use_audio_in_video=USE_AUDIO_IN_VIDEO |
|
|
) |
|
|
|
|
|
|
|
|
output_text = processor.batch_decode( |
|
|
text_ids.sequences[:, inputs["input_ids"].shape[1] :], |
|
|
skip_special_tokens=True, |
|
|
clean_up_tokenization_spaces=False |
|
|
) |
|
|
|
|
|
print(f"Output Text: {output_text}") |
|
|
|
|
|
|
|
|
if audio is not None: |
|
|
print("Saving audio to output.wav...") |
|
|
sf.write( |
|
|
"output.wav", |
|
|
audio.reshape(-1).detach().cpu().numpy(), |
|
|
samplerate=24000, |
|
|
) |
|
|
print("Done.") |