import streamlit as st from groq import Groq import time import matplotlib.pyplot as plt API_KEY = "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" # Function to interact with the model def get_model_response(prompt): try: client = Groq(api_key=API_KEY) chat_completion = client.chat.completions.create( messages=[ { "role": "user", "content": prompt, } ], model="llama3-8b-8192", # Update with your desired model ) response = chat_completion.choices[0].message.content return response except Exception as e: return f"An error occurred: {str(e)}" # Streamlit App st.set_page_config(page_title="AI Chat Interface", page_icon='https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRDtq0iX3NP1eu6DiEnnmm1moWENvPHYNsMZQ&s', layout="wide") # Removed semicolon # Add a header with logo (optional) st.markdown("""
Logo

AI Chat Interface

""", unsafe_allow_html=True) # Progress bar animation for loading with st.spinner("Thinking..."): time.sleep(2) # Simulate processing time # User input with placeholder and help text user_input = st.text_input("Your Prompt", placeholder="Type your question here...", help="Feel free to ask anything!") # Display "Thinking..." animation while fetching response if st.button("Get Response"): with st.spinner("Fetching Response..."): response = get_model_response(user_input) st.write(f" {response}")