| import os |
| from dotenv import find_dotenv, load_dotenv |
| import streamlit as st |
| from typing import Generator |
| from groq import Groq |
|
|
| _ = load_dotenv(find_dotenv()) |
| st.set_page_config(page_icon="π", layout="wide", page_title="Groq & LLaMA3.1 Chat Bot...") |
|
|
|
|
| def icon(emoji: str): |
| """Shows an emoji as a Notion-style page icon.""" |
| st.write( |
| f'<span style="font-size: 78px; line-height: 1">{emoji}</span>', |
| unsafe_allow_html=True, |
| ) |
|
|
|
|
| |
|
|
| st.subheader("Groq Chat with LLaMA3.1 App", divider="rainbow", anchor=False) |
|
|
| client = Groq( |
| api_key=os.environ['GROQ_API_KEY'], |
| ) |
|
|
| |
| if "messages" not in st.session_state: |
| st.session_state.messages = [] |
|
|
| if "selected_model" not in st.session_state: |
| st.session_state.selected_model = None |
|
|
| |
| models = { |
| "llama-3.1-70b-versatile": {"name": "LLaMA3.1-70b", "tokens": 4096, "developer": "Meta"}, |
| "llama-3.1-8b-instant": {"name": "LLaMA3.1-8b", "tokens": 4096, "developer": "Meta"}, |
| "llama3-70b-8192": {"name": "Meta Llama 3 70B", "tokens": 4096, "developer": "Meta"}, |
| "llama3-8b-8192": {"name": "Meta Llama 3 8B", "tokens": 4096, "developer": "Meta"}, |
| "llama3-groq-70b-8192-tool-use-preview": {"name": "Llama 3 Groq 70B Tool Use (Preview)", "tokens": 4096, "developer": "Groq"}, |
| "gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 4096, "developer": "Google"}, |
| "mixtral-8x7b-32768": { |
| "name": "Mixtral-8x7b-Instruct-v0.1", |
| "tokens": 32768, |
| "developer": "Mistral", |
| }, |
| } |
|
|
| |
| col1, col2 = st.columns([1, 3]) |
|
|
|
|
| with col1: |
| model_option = st.selectbox( |
| "Choose a model:", |
| options=list(models.keys()), |
| format_func=lambda x: models[x]["name"], |
| index=0, |
| ) |
| max_tokens_range = models[model_option]["tokens"] |
| max_tokens = st.slider( |
| "Max Tokens:", |
| min_value=512, |
| max_value=max_tokens_range, |
| value=min(32768, max_tokens_range), |
| step=512, |
| help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}", |
| ) |
|
|
| |
| if st.session_state.selected_model != model_option: |
| st.session_state.messages = [] |
| st.session_state.selected_model = model_option |
|
|
| |
| if st.button("Clear Chat"): |
| st.session_state.messages = [] |
| |
| |
| for message in st.session_state.messages: |
| avatar = "π" if message["role"] == "assistant" else "π§βπ»" |
| with st.chat_message(message["role"], avatar=avatar): |
| st.markdown(message["content"]) |
|
|
|
|
| def generate_chat_responses(chat_completion) -> Generator[str, None, None]: |
| """Yield chat response content from the Groq API response.""" |
| for chunk in chat_completion: |
| if chunk.choices[0].delta.content: |
| yield chunk.choices[0].delta.content |
|
|
|
|
| if prompt := st.chat_input("Enter your prompt here..."): |
| st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
| with st.chat_message("user", avatar="π§βπ»"): |
| st.markdown(prompt) |
|
|
| |
| try: |
| chat_completion = client.chat.completions.create( |
| model=model_option, |
| messages=[ |
| {"role": m["role"], "content": m["content"]} |
| for m in st.session_state.messages |
| ], |
| max_tokens=max_tokens, |
| stream=True, |
| ) |
|
|
| |
| with st.chat_message("assistant", avatar="π"): |
| chat_responses_generator = generate_chat_responses(chat_completion) |
| full_response = st.write_stream(chat_responses_generator) |
| except Exception as e: |
| st.error(e, icon="β") |
|
|
| |
| if isinstance(full_response, str): |
| st.session_state.messages.append( |
| {"role": "assistant", "content": full_response} |
| ) |
| else: |
| |
| combined_response = "\n".join(str(item) for item in full_response) |
| st.session_state.messages.append( |
| {"role": "assistant", "content": combined_response} |
| ) |