|
|
import streamlit as st |
|
|
import uuid |
|
|
from modules.llm import GroqClient, GroqCompletion |
|
|
|
|
|
|
|
|
api_key = st.secrets["GROQ_API_KEY"] |
|
|
groq_client = GroqClient(api_key) |
|
|
|
|
|
|
|
|
llm_model = { |
|
|
"LLama3-70B": "llama3-70b-8192", |
|
|
"LLama3.1-70B": "llama-3.1-70b-versatile", |
|
|
"LLama3-70B-Preview": "llama3-groq-70b-8192-tool-use-preview", |
|
|
} |
|
|
max_tokens = 8192 |
|
|
|
|
|
|
|
|
st.set_page_config(page_title="Simple Chatbot", layout="centered") |
|
|
st.markdown("<h2 style='text-align: center;'>Simple Chatbot</h2>", unsafe_allow_html=True) |
|
|
st.markdown("---") |
|
|
|
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
selected_llm_model = st.selectbox("Select LLM Model", options=list(llm_model.keys()), index=0) |
|
|
with col2: |
|
|
temperature = st.slider("Select Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.1) |
|
|
|
|
|
|
|
|
if 'session_id' not in st.session_state: |
|
|
st.session_state['session_id'] = str(uuid.uuid4()) |
|
|
st.session_state['history'] = [] |
|
|
|
|
|
|
|
|
q_input = st.text_input("Enter your question here:", placeholder="Type your question...") |
|
|
|
|
|
|
|
|
if q_input: |
|
|
st.session_state['history'].append({"role": "user", "content": q_input}) |
|
|
|
|
|
|
|
|
LLMmodel = llm_model[selected_llm_model] |
|
|
prompt_template = q_input |
|
|
|
|
|
groq_completion = GroqCompletion( |
|
|
groq_client, LLMmodel, "General", prompt_template, temperature=temperature, max_tokens=max_tokens |
|
|
) |
|
|
result = groq_completion.create_completion() |
|
|
|
|
|
|
|
|
with st.spinner("Processing..."): |
|
|
response = result["choices"][0]["message"]["content"] |
|
|
st.session_state['history'].append({"role": "assistant", "content": response}) |
|
|
|
|
|
|
|
|
for msg in st.session_state['history']: |
|
|
if msg["role"] == "user": |
|
|
st.markdown(f"**User**: {msg['content']}") |
|
|
else: |
|
|
st.markdown(f"**Assistant**: {msg['content']}") |
|
|
|