File size: 1,873 Bytes
50d8efe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import torch
import torchaudio
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import gradio as gr
# 1οΈβ£ Load Whisper model for Speech-to-Text
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-small")
# 2οΈβ£ Load Qwen-style LLM for text response
model_name = "Qwen/Qwen1.5-0.5B-Chat"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto",torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
# Bot reply generator
def generate_response(user_text):
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": user_text}
]
# Use chat template formatting
input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(input_ids, max_new_tokens=150, pad_token_id=tokenizer.eos_token_id)
decoded = tokenizer.decode(output[0], skip_special_tokens=True)
# Return only the assistant's message
response = decoded.split("assistant")[-1].strip().replace(":", "").strip()
return response
# Complete pipeline: Audio β Text β Response
def audio_to_bot_response(audio_path):
print("[INFO] Transcribing audio...")
result = asr_pipe(audio_path)
user_text = result['text']
print(f"[INFO] Transcribed: {user_text}")
response = generate_response(user_text)
return f"π€ You said: {user_text}\nπ€ Bot: {response}"
interface = gr.Interface(
fn=audio_to_bot_response,
inputs=gr.Audio(sources=["microphone"], type="filepath"),
outputs="text",
title="π Voice to AI Bot Response",
description="Speak into the mic. The AI will transcribe and respond."
)
interface.launch() |