Spaces:
Sleeping
Sleeping
File size: 3,725 Bytes
86b946a 9030e5b bfaeb19 a8498c0 bfaeb19 a6532a3 be3e24b a6532a3 6d8c079 fdc0420 1559379 512492d a6532a3 b453ff2 a6532a3 512492d 46cf1cd dc92339 512492d a6532a3 b453ff2 a6532a3 e20f46f a6532a3 b453ff2 a6532a3 cab6a89 512492d a6532a3 512492d 9030e5b cab6a89 512492d b453ff2 512492d b453ff2 512492d b453ff2 512492d d06f4eb 512492d f845a0a 512492d d955865 a197dc7 883b37e 512492d b453ff2 512492d d955865 e0d541d 512492d b453ff2 3405778 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import streamlit as st
from gradio_client import Client
from st_audiorec import st_audiorec
from gtts import gTTS
import os
# Constants
TITLE = "๐๐น๐ฎ๐๐น๐ฒ๐๐๐๐"
DESCRIPTION = """"""
# Sidebar
with st.sidebar:
temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
max_new_tokensSide = st.slider("Max Tokens", min_value=0.0, max_value=4096.0, value=512.0, step=64.0)
ToppSide = st.slider("Top-P (Nucleus Sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
RepetitionpenaltySide = st.slider("Repetition Penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
# Function to convert text to speech using gTTS
def text_to_speech(text, lang='en'):
tts = gTTS(text=text, lang=lang, slow=False)
tts.save("response.mp3")
return "response.mp3"
# Prediction function
def predict(message, system_prompt='Your name is FlawlessAI. You are developed by Achyuth. Your developer is a 14-year old boy, who is interested in AI and Machine Learning. Dont talk about your developers age, until the user asks for your developers age. You need to give the user a long and big text as output with useful information. When the user asks you for information, never exclaim the words, and directly come to point and answer to the user. You need to provide short text-answers, meaningful answers and directly to the point. Never miscalculate the calculations. You need to check and verify the answer and then provide to user, the correct answer. You need to provide accurate and correct information. If the user conveys with you in any language, you also should convey in the same language',
temperature=0.7, max_new_tokens=2048, Topp=0.5, Repetitionpenalty=1.2):
with st.status("sแดแดสแดษชษดษข แดสษชแดษดแด..."):
client = Client("https://huggingface-projects-llama-2-13b-chat.hf.space/")
st.write("Requesting Flawless-LLM...")
with st.status("Processing response..."):
response = client.predict(
message,
system_prompt,
max_new_tokens,
temperature,
Topp,
500,
Repetitionpenalty,
api_name="/chat"
)
st.write("Success โ
")
return response
# Streamlit UI
st.title(TITLE)
st.write(DESCRIPTION)
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=("๐งโ๐ป" if message["role"] == 'human' else '๐ฆ')):
st.markdown(message["content"])
# Mic input
wav_audio_data = st_audiorec()
# Chat input
textinput = st.chat_input("Ask FlawlessAI anything...")
# Handle mic input (raw audio file)
if wav_audio_data is not None:
st.chat_message("human", avatar="๐").markdown("๐๏ธ Voice message recorded.")
# Add to chat history
st.session_state.messages.append({"role": "human", "content": "[Voice Message]"})
# (Optional: You can later connect a transcription service here if you want)
# Handle text input
if prompt := textinput:
# Display user message
st.chat_message("human", avatar="๐ฅ").markdown(prompt)
st.session_state.messages.append({"role": "human", "content": prompt})
# Generate response
response = predict(message=prompt)
# Convert AI response to speech
speech_file = text_to_speech(response)
# Display assistant response
with st.chat_message("assistant", avatar='๐ฅ'):
st.markdown(response)
st.audio(speech_file, format="audio/mp3")
st.session_state.messages.append({"role": "assistant", "content": response})
|