CO-tiny / app.py
Kurian07's picture
Create app.py
ea273a9 verified
import streamlit as st
import uuid
from modules.llm import GroqClient, GroqCompletion
# Securely access API key from Streamlit secrets
api_key = st.secrets["GROQ_API_KEY"]
groq_client = GroqClient(api_key)
# Define LLaMA models and settings
llm_model = {
"LLama3-70B": "llama3-70b-8192",
"LLama3.1-70B": "llama-3.1-70b-versatile",
"LLama3-70B-Preview": "llama3-groq-70b-8192-tool-use-preview",
}
max_tokens = 8192 # Default for simplicity
# Streamlit app setup
st.set_page_config(page_title="Simple Chatbot", layout="centered")
st.markdown("<h2 style='text-align: center;'>Simple Chatbot</h2>", unsafe_allow_html=True)
st.markdown("---")
# Top-bar UI for model and temperature selection
col1, col2 = st.columns(2)
with col1:
selected_llm_model = st.selectbox("Select LLM Model", options=list(llm_model.keys()), index=0)
with col2:
temperature = st.slider("Select Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
# Unique session ID
if 'session_id' not in st.session_state:
st.session_state['session_id'] = str(uuid.uuid4())
st.session_state['history'] = []
# User input
q_input = st.text_input("Enter your question here:", placeholder="Type your question...")
# Process input and generate response
if q_input:
st.session_state['history'].append({"role": "user", "content": q_input})
# Create completion
LLMmodel = llm_model[selected_llm_model]
prompt_template = q_input
groq_completion = GroqCompletion(
groq_client, LLMmodel, "General", prompt_template, temperature=temperature, max_tokens=max_tokens
)
result = groq_completion.create_completion()
# Display response
with st.spinner("Processing..."):
response = result["choices"][0]["message"]["content"]
st.session_state['history'].append({"role": "assistant", "content": response})
# Render chat history
for msg in st.session_state['history']:
if msg["role"] == "user":
st.markdown(f"**User**: {msg['content']}")
else:
st.markdown(f"**Assistant**: {msg['content']}")