| import streamlit as st | |
| import edge_tts | |
| import asyncio | |
| import tempfile | |
| import os | |
| from huggingface_hub import InferenceClient | |
| import re | |
| from streaming_stt_nemo import Model | |
| import torch | |
| import random | |
| default_lang = "en" | |
| engines = {default_lang: Model(default_lang)} | |
| def transcribe(audio): | |
| lang = "en" | |
| model = engines[lang] | |
| text = model.stt_file(audio)[0] | |
| return text | |
| HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
| def client_fn(model): | |
| if "Mixtral" in model: | |
| return InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
| elif "Llama" in model: | |
| return InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") | |
| elif "Mistral" in model: | |
| return InferenceClient("mistralai/Mistral-7B-Instruct-v0.3") | |
| elif "Phi" in model: | |
| return InferenceClient("microsoft/Phi-3-mini-4k-instruct") | |
| else: | |
| return InferenceClient("microsoft/Phi-3-mini-4k-instruct") | |
| def randomize_seed_fn(seed: int) -> int: | |
| seed = random.randint(0, 999999) | |
| return seed | |
| system_instructions1 = """ | |
| [SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Tony Stark.' | |
| Keep conversation friendly, short, clear, and concise. | |
| Avoid unnecessary introductions and answer the user's questions directly. | |
| Respond in a normal, conversational manner while being friendly and helpful. | |
| [USER] | |
| """ | |
| def models(text, model="Mixtral 8x7B", seed=42): | |
| seed = int(randomize_seed_fn(seed)) | |
| generator = torch.Generator().manual_seed(seed) | |
| client = client_fn(model) | |
| generate_kwargs = dict( | |
| max_new_tokens=300, | |
| seed=seed | |
| ) | |
| formatted_prompt = system_instructions1 + text + "[JARVIS]" | |
| stream = client.text_generation( | |
| formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
| output = "" | |
| for response in stream: | |
| if not response.token.text == "</s>": | |
| output += response.token.text | |
| return output | |
| async def respond(audio, model, seed): | |
| user = transcribe(audio) | |
| reply = models(user, model, seed) | |
| communicate = edge_tts.Communicate(reply) | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file: | |
| tmp_path = tmp_file.name | |
| await communicate.save(tmp_path) | |
| return tmp_path | |
| st.title("JARVIS⚡") | |
| st.markdown("### A personal Assistant of Tony Stark for YOU") | |
| st.markdown("### Voice Chat with your personal Assistant") | |
| with st.form("voice_form"): | |
| model_choice = st.selectbox("Choose a model", ['Mixtral 8x7B', 'Llama 3 8B', 'Mistral 7B v0.3', 'Phi 3 mini'], key="voice_model") | |
| audio_file = st.file_uploader("Upload Audio", type=["wav", "mp3"], key="voice_audio") | |
| submit_button = st.form_submit_button("Submit") | |
| if submit_button: | |
| if audio_file is not None: | |
| with st.spinner("Transcribing and generating response..."): | |
| audio_bytes = audio_file.read() | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file: | |
| tmp_file.write(audio_bytes) | |
| tmp_path = tmp_file.name | |
| response = respond(tmp_path, model_choice, 42) | |
| st.audio(response, format='audio/wav') | |
| with st.form("text_form"): | |
| model_choice = st.selectbox("Choose a model", ['Mixtral 8x7B', 'Llama 3 8B', 'Mistral 7B v0.3', 'Phi 3 mini'], key="text_model") | |
| user_text = st.text_area("Enter your message:", key="text_input") | |
| submit_button = st.form_submit_button("Submit") | |
| if submit_button: | |
| if user_text: | |
| with st.spinner("Generating response..."): | |
| response = models(user_text, model_choice, 42) | |
| st.text_area("JARVIS Response", value=response, key="text_output", height=150) |