Engineer786 commited on
Commit
aa8ab73
·
verified ·
1 Parent(s): abe8342

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -22
app.py CHANGED
@@ -1,30 +1,31 @@
1
  import streamlit as st
2
  from groq import Groq
3
  import time
 
4
 
5
  # API Key (replace with yours)
6
  API_KEY = "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"
7
 
8
  # Function to interact with the model
9
  def get_model_response(prompt):
10
- try:
11
- client = Groq(api_key=API_KEY)
12
- chat_completion = client.chat.completions.create(
13
- messages=[
14
- {
15
- "role": "user",
16
- "content": prompt,
17
- }
18
- ],
19
- model="llama3-8b-8192", # Update with your desired model
20
- )
21
- response = chat_completion.choices[0].message.content
22
- return response
23
- except Exception as e:
24
- return f"An error occurred: {str(e)}"
25
 
26
  # Streamlit App
27
- st.set_page_config(page_title="AI Chat Interface", page_icon="") # Set title and icon
28
 
29
  # Add a header with logo (optional)
30
  st.markdown("""
@@ -36,15 +37,36 @@ st.markdown("""
36
 
37
  # Progress bar animation for loading
38
  with st.spinner("Thinking..."):
39
- time.sleep(2) # Simulate processing time
40
 
41
- user_input = st.text_input("Your Prompt", placeholder="Type your question here...", key="prompt")
 
42
 
43
  # Display "Thinking..." animation while fetching response
44
  if st.button("Get Response"):
45
- with st.spinner("Fetching Response..."):
46
- response = get_model_response(user_input)
47
- st.write(f"**Model Response:** {response}")
48
 
49
  # Success message animation
50
- st.success("Response retrieved successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from groq import Groq
3
  import time
4
+ import matplotlib.pyplot as plt
5
 
6
  # API Key (replace with yours)
7
  API_KEY = "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"
8
 
9
  # Function to interact with the model
10
  def get_model_response(prompt):
11
+ try:
12
+ client = Groq(api_key=API_KEY)
13
+ chat_completion = client.chat.completions.create(
14
+ messages=[
15
+ {
16
+ "role": "user",
17
+ "content": prompt,
18
+ }
19
+ ],
20
+ model="llama3-8b-8192", # Update with your desired model
21
+ )
22
+ response = chat_completion.choices[0].message.content
23
+ return response
24
+ except Exception as e:
25
+ return f"An error occurred: {str(e)}"
26
 
27
  # Streamlit App
28
+ st.set_page_config(page_title="AI Chat Interface", page_icon="🤖", layout="wide")
29
 
30
  # Add a header with logo (optional)
31
  st.markdown("""
 
37
 
38
  # Progress bar animation for loading
39
  with st.spinner("Thinking..."):
40
+ time.sleep(2) # Simulate processing time
41
 
42
+ # User input with placeholder and help text
43
+ user_input = st.text_input("Your Prompt", placeholder="Type your question here...", help="Feel free to ask anything!")
44
 
45
  # Display "Thinking..." animation while fetching response
46
  if st.button("Get Response"):
47
+ with st.spinner("Fetching Response..."):
48
+ response = get_model_response(user_input)
49
+ st.write(f"**Model Response:** {response}")
50
 
51
  # Success message animation
52
+ st.success("Response retrieved successfully!")
53
+
54
+ # Example of a simple chart (if applicable)
55
+ if "chart" in response.lower():
56
+ # Generate some random data
57
+ x = [1, 2, 3, 4, 5]
58
+ y = [2, 4, 5, 4, 5]
59
+
60
+ # Create a simple line chart
61
+ plt.plot(x, y)
62
+ plt.xlabel("X-axis")
63
+ plt.ylabel("Y-axis")
64
+ plt.title("Simple Line Chart")
65
+ st.pyplot(plt)
66
+
67
+ # Additional tips:
68
+ # - Use custom CSS to style the app further.
69
+ # - Explore Streamlit's components library for more interactive elements.
70
+ # - Consider using a library like `plotly` for more advanced visualizations.
71
+ # - Implement error handling and logging for a robust app.
72
+ # - Test your app thoroughly on different browsers and devices.