|
|
import torch |
|
|
import torchaudio |
|
|
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer |
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-small") |
|
|
|
|
|
|
|
|
model_name = "Qwen/Qwen1.5-0.5B-Chat" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto",torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32) |
|
|
|
|
|
|
|
|
def generate_response(user_text): |
|
|
messages = [ |
|
|
{"role": "system", "content": "You are a helpful assistant."}, |
|
|
{"role": "user", "content": user_text} |
|
|
] |
|
|
|
|
|
|
|
|
input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
output = model.generate(input_ids, max_new_tokens=150, pad_token_id=tokenizer.eos_token_id) |
|
|
|
|
|
decoded = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
response = decoded.split("assistant")[-1].strip().replace(":", "").strip() |
|
|
|
|
|
return response |
|
|
|
|
|
|
|
|
|
|
|
def audio_to_bot_response(audio_path): |
|
|
print("[INFO] Transcribing audio...") |
|
|
result = asr_pipe(audio_path) |
|
|
user_text = result['text'] |
|
|
|
|
|
|
|
|
print(f"[INFO] Transcribed: {user_text}") |
|
|
response = generate_response(user_text) |
|
|
|
|
|
return f"π€ You said: {user_text}\nπ€ Bot: {response}" |
|
|
|
|
|
interface = gr.Interface( |
|
|
fn=audio_to_bot_response, |
|
|
inputs=gr.Audio(sources=["microphone"], type="filepath"), |
|
|
outputs="text", |
|
|
title="π Voice to AI Bot Response", |
|
|
description="Speak into the mic. The AI will transcribe and respond." |
|
|
) |
|
|
|
|
|
interface.launch() |