Haseeb-001 commited on
Commit
2444ddb
Β·
verified Β·
1 Parent(s): cba53df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -75
app.py CHANGED
@@ -1,11 +1,8 @@
1
  import os
2
- from groq import Groq
3
- import pandas as pd
4
- import speech_recognition as sr
5
- import pyttsx3
6
  import streamlit as st
 
7
 
8
- # API_KEY
9
  API = "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"
10
 
11
  # Set up the Groq client
@@ -13,78 +10,42 @@ client = Groq(api_key=API)
13
 
14
  # Function to process user input with Llama model
15
  def process_prompt(prompt, model="llama-3.3-70b-versatile"):
16
- chat_completion = client.chat.completions.create(
17
- messages=[{"role": "user", "content": prompt}],
18
- model=model,
19
- stream=False
20
- )
21
- return chat_completion.choices[0].message.content
22
-
23
- # Function to process uploaded files
24
- def process_file(file):
25
- if file.type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
26
- data = pd.read_excel(file)
27
- elif file.type == "text/csv":
28
- data = pd.read_csv(file)
29
- else:
30
- return "Unsupported file format."
31
- return data
32
-
33
- # Function to generate human-friendly responses for file data
34
- def analyze_file_data(data):
35
- summary = f"File contains {data.shape[0]} rows and {data.shape[1]} columns."
36
- response = f"""
37
- **File Analysis:**
38
- - πŸ“„ Total Rows: {data.shape[0]}
39
- - πŸ“Š Total Columns: {data.shape[1]}
40
- - First Few Rows:
41
- {data.head().to_markdown()}
42
- """
43
- return response
44
-
45
- # Function for speech-to-text
46
- def speech_to_text():
47
- recognizer = sr.Recognizer()
48
- with sr.Microphone() as source:
49
- st.write("πŸŽ™οΈ Listening...")
50
- audio = recognizer.listen(source)
51
  try:
52
- return recognizer.recognize_google(audio)
53
- except sr.UnknownValueError:
54
- return "Sorry, I didn't catch that."
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- # Function for text-to-speech
57
- def text_to_speech(text):
58
- engine = pyttsx3.init()
59
- engine.say(text)
60
- engine.runAndWait()
61
 
62
  # Streamlit UI
63
  def chatbot_ui():
64
  st.title("πŸ“’ Real-Time AI Chatbot")
65
 
66
- # File upload feature
67
- st.sidebar.header("πŸ“‚ File Management")
68
- uploaded_file = st.sidebar.file_uploader("Upload a file (CSV/Excel)", type=["csv", "xlsx"])
69
- if uploaded_file:
70
- data = process_file(uploaded_file)
71
- st.sidebar.write(analyze_file_data(data))
72
- if st.sidebar.button("Delete File"):
73
- uploaded_file = None
74
- st.sidebar.write("File deleted.")
75
-
76
  # Chat section
77
  st.header("πŸ’¬ Chat Section")
78
- chat_history = st.session_state.get("chat_history", [])
79
-
80
  with st.form("chat_form", clear_on_submit=True):
81
  user_input = st.text_input("Type your message or prompt here...")
82
  submitted = st.form_submit_button("Send")
83
  if submitted and user_input:
84
  response = process_prompt(user_input)
85
- chat_history.append(("User", user_input))
86
- chat_history.append(("Bot", response))
87
- st.session_state["chat_history"] = chat_history
88
 
89
  # Display chat history
90
  for sender, message in chat_history:
@@ -98,16 +59,5 @@ def chatbot_ui():
98
  chat_file = "\n".join([f"{sender}: {message}" for sender, message in chat_history])
99
  st.download_button("Download", chat_file, "chat_history.txt", "text/plain")
100
 
101
- # Speech-to-Text Section
102
- st.header("πŸŽ™οΈ Speech-to-Text")
103
- if st.button("Start Speech Recognition"):
104
- st.write(f"**πŸ‘‚ You said:** {speech_to_text()}")
105
-
106
- # Text-to-Speech Section
107
- st.header("πŸ”Š Text-to-Speech")
108
- tts_text = st.text_input("Enter text to convert to speech:")
109
- if st.button("Speak Text"):
110
- text_to_speech(tts_text)
111
-
112
  if __name__ == "__main__":
113
- chatbot_ui()
 
1
  import os
 
 
 
 
2
  import streamlit as st
3
+ from groq import Groq
4
 
5
+ # API_KEY (Replace with your actual Groq API key)
6
  API = "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"
7
 
8
  # Set up the Groq client
 
10
 
11
  # Function to process user input with Llama model
12
  def process_prompt(prompt, model="llama-3.3-70b-versatile"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  try:
14
+ chat_completion = client.chat.completions.create(
15
+ messages=[{"role": "user", "content": prompt}],
16
+ model=model,
17
+ stream=False
18
+ )
19
+ return chat_completion.choices[0].message.content
20
+ except Exception as e:
21
+ print(f"Error processing prompt: {e}") # Log the error for debugging
22
+ return "An error occurred. Please try again later."
23
+
24
+ # Function for chat history management (using session_state)
25
+ def get_chat_history():
26
+ chat_history = st.session_state.get("chat_history", [])
27
+ return chat_history
28
 
29
+ def update_chat_history(user_input, response):
30
+ chat_history = get_chat_history()
31
+ chat_history.append(("User", user_input))
32
+ chat_history.append(("Bot", response))
33
+ st.session_state["chat_history"] = chat_history
34
 
35
  # Streamlit UI
36
  def chatbot_ui():
37
  st.title("πŸ“’ Real-Time AI Chatbot")
38
 
 
 
 
 
 
 
 
 
 
 
39
  # Chat section
40
  st.header("πŸ’¬ Chat Section")
41
+ chat_history = get_chat_history()
42
+
43
  with st.form("chat_form", clear_on_submit=True):
44
  user_input = st.text_input("Type your message or prompt here...")
45
  submitted = st.form_submit_button("Send")
46
  if submitted and user_input:
47
  response = process_prompt(user_input)
48
+ update_chat_history(user_input, response)
 
 
49
 
50
  # Display chat history
51
  for sender, message in chat_history:
 
59
  chat_file = "\n".join([f"{sender}: {message}" for sender, message in chat_history])
60
  st.download_button("Download", chat_file, "chat_history.txt", "text/plain")
61
 
 
 
 
 
 
 
 
 
 
 
 
62
  if __name__ == "__main__":
63
+ chatbot_ui()