notes73 commited on
Commit
175f982
Β·
1 Parent(s): 7562591

Updated chatbot app

Browse files
Files changed (3) hide show
  1. .gitattributes +35 -0
  2. app.py +145 -18
  3. requirements.txt +9 -2
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1,33 +1,160 @@
1
  import streamlit as st
2
  import openai
3
  import os
 
 
 
 
 
 
 
4
 
5
- # Set OpenAI API key
6
- openai.api_key = os.getenv("OPENAI_API_KEY")
 
 
7
 
8
- if not openai.api_key:
9
- st.error("⚠️ OpenAI API key is missing! Please add it in Hugging Face secrets.")
10
- st.stop()
 
 
11
 
12
  # App title
13
- st.title("πŸ€– Interactive Chatbot Assistant")
 
14
 
15
- # User input
16
- user_input = st.text_input("You:", "")
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- if st.button("Send"):
19
- if user_input:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  try:
21
- response = openai.chat.completions.create(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  model="gpt-4",
23
- messages=[{"role": "user", "content": user_input}]
 
 
 
 
24
  )
25
- st.text_area("Chatbot:", response.choices[0].message.content, height=200)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  except Exception as e:
27
- st.error(f"Error: {e}")
28
- else:
29
- st.warning("Please enter a message to chat.")
30
 
31
- st.sidebar.subheader("Chatbot Settings")
32
- st.sidebar.write("Customize your chatbot experience!")
 
 
 
 
 
 
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import openai
3
  import os
4
+ from langchain.document_loaders import PyPDFLoader
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain.embeddings import OpenAIEmbeddings
7
+ from langchain.vectorstores import FAISS
8
+ from openai import OpenAI
9
+ import base64
10
+ import requests
11
 
12
+ # Initialize clients
13
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
14
+ elevenlabs_key = os.getenv("ELEVENLABS_API_KEY")
15
+ tavily_key = os.getenv("TAVILY_API_KEY")
16
 
17
+ # Initialize session state
18
+ if "messages" not in st.session_state:
19
+ st.session_state.messages = []
20
+ if "document_vectors" not in st.session_state:
21
+ st.session_state.document_vectors = None
22
 
23
  # App title
24
+ st.title("πŸ€– SuperBot Pro")
25
+ st.caption("An AI Assistant with Superpowers")
26
 
27
+ # ===== Sidebar Settings =====
28
+ with st.sidebar:
29
+ st.header("βš™οΈ Settings")
30
+
31
+ # Personality and Mode
32
+ tone = st.selectbox("Personality:", ["Assistant", "Sarcastic", "Academic", "Shakespeare"])
33
+ mode = st.radio("Mode:", ["Chat", "Document Q&A", "Web Researcher"])
34
+
35
+ # Advanced Features
36
+ web_access = st.checkbox("Enable Web Search", value=False)
37
+ voice_enabled = st.checkbox("Enable Voice Response", value=False)
38
+
39
+ # File Uploaders
40
+ uploaded_file = st.file_uploader("Upload Document (PDF)", type=["pdf"])
41
+ uploaded_image = st.file_uploader("Upload Image", type=["jpg", "png"])
42
 
43
+ # ===== Document Processing =====
44
+ if uploaded_file and mode == "Document Q&A":
45
+ with st.spinner("Processing document..."):
46
+ # Save and load PDF
47
+ with open(uploaded_file.name, "wb") as f:
48
+ f.write(uploaded_file.getbuffer())
49
+ loader = PyPDFLoader(uploaded_file.name)
50
+ pages = loader.load()
51
+
52
+ # Split and embed
53
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
54
+ texts = text_splitter.split_documents(pages)
55
+ embeddings = OpenAIEmbeddings()
56
+ st.session_state.document_vectors = FAISS.from_documents(texts, embeddings)
57
+
58
+ # ===== Chat Interface =====
59
+ for message in st.session_state.messages:
60
+ with st.chat_message(message["role"]):
61
+ st.markdown(message["content"])
62
+ if "audio" in message:
63
+ st.audio(message["audio"], format="audio/mp3")
64
+
65
+ # Input area
66
+ if prompt := st.chat_input("Ask me anything..."):
67
+ # Add user message to history
68
+ st.session_state.messages.append({"role": "user", "content": prompt})
69
+
70
+ with st.chat_message("user"):
71
+ st.markdown(prompt)
72
+
73
+ # ===== Generate Response =====
74
+ with st.chat_message("assistant"):
75
+ response_placeholder = st.empty()
76
+ full_response = ""
77
+
78
  try:
79
+ # ===== Content Moderation =====
80
+ moderation = client.moderations.create(input=prompt)
81
+ if moderation.results[0].flagged:
82
+ st.error("Content violates policies")
83
+ st.stop()
84
+
85
+ # ===== System Prompt Engineering =====
86
+ system_prompt = f"You are a {tone} assistant. Respond concisely."
87
+
88
+ if mode == "Document Q&A" and st.session_state.document_vectors:
89
+ docs = st.session_state.document_vectors.similarity_search(prompt, k=3)
90
+ system_prompt += f"\nDocument context: {[doc.page_content for doc in docs]}"
91
+
92
+ if web_access and tavily_key:
93
+ search_response = requests.post(
94
+ "https://api.tavily.com/search",
95
+ json={"query": prompt, "api_key": tavily_key}
96
+ )
97
+ web_results = search_response.json()["results"]
98
+ system_prompt += f"\nWeb results: {web_results[:2]}"
99
+
100
+ # ===== Generate with OpenAI =====
101
+ response = client.chat.completions.create(
102
  model="gpt-4",
103
+ messages=[
104
+ {"role": "system", "content": system_prompt},
105
+ *st.session_state.messages
106
+ ],
107
+ stream=True
108
  )
109
+
110
+ # Stream response
111
+ for chunk in response:
112
+ if chunk.choices[0].delta.content:
113
+ full_response += chunk.choices[0].delta.content
114
+ response_placeholder.markdown(full_response + "β–Œ")
115
+
116
+ response_placeholder.markdown(full_response)
117
+
118
+ # ===== Voice Output =====
119
+ if voice_enabled and elevenlabs_key:
120
+ audio_response = requests.post(
121
+ "https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM",
122
+ headers={"xi-api-key": elevenlabs_key},
123
+ json={"text": full_response}
124
+ )
125
+ st.audio(audio_response.content, format="audio/mp3")
126
+
127
+ # Add to history
128
+ st.session_state.messages.append({
129
+ "role": "assistant",
130
+ "content": full_response,
131
+ "audio": audio_response.content if voice_enabled else None
132
+ })
133
+
134
  except Exception as e:
135
+ st.error(f"Error: {str(e)}")
 
 
136
 
137
+ # ===== Feedback Buttons =====
138
+ col1, col2 = st.columns(2)
139
+ with col1:
140
+ if st.button("πŸ‘ Good Response"):
141
+ st.toast("Thanks for your feedback!")
142
+ with col2:
143
+ if st.button("πŸ‘Ž Needs Improvement"):
144
+ st.toast("We'll do better next time!")
145
 
146
+ # ===== Image Processing =====
147
+ if uploaded_image:
148
+ st.image(uploaded_image, caption="Uploaded Image")
149
+ with st.spinner("Analyzing image..."):
150
+ response = client.chat.completions.create(
151
+ model="gpt-4-vision-preview",
152
+ messages=[{
153
+ "role": "user",
154
+ "content": [
155
+ {"type": "text", "text": "Describe this image"},
156
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64.b64encode(uploaded_image.getvalue()).decode('utf-8')}"}}
157
+ ]
158
+ }]
159
+ )
160
+ st.write(response.choices[0].message.content)
requirements.txt CHANGED
@@ -1,2 +1,9 @@
1
- streamlit
2
- openai
 
 
 
 
 
 
 
 
1
+ streamlit>=1.28
2
+ openai>=1.3
3
+ python-dotenv>=1.0
4
+ langchain>=0.0.340
5
+ PyPDF2>=3.0
6
+ faiss-cpu>=1.7
7
+ elevenlabs>=0.2.12
8
+ tavily-python>=0.2
9
+ requests>=2.31