addaweathers commited on
Commit
0108a14
Β·
verified Β·
1 Parent(s): 0fd7a55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -37
app.py CHANGED
@@ -1,29 +1,28 @@
1
- # Install required libraries.
2
- # Files, API key, etc
3
  import os
4
  from pathlib import Path
5
- from dotenv import load_dotenv
6
  import gradio as gr
7
 
8
- # LangChain imports
 
 
 
9
  from langchain_huggingface import HuggingFaceEmbeddings
10
  from langchain_community.document_loaders import UnstructuredMarkdownLoader
11
  from langchain_text_splitters import MarkdownHeaderTextSplitter
12
  from langchain_community.vectorstores import Chroma
13
- from langchain.chains.combine_documents import create_stuff_documents_chain
14
- from langchain.chains import create_retrieval_chain
15
  from langchain_core.prompts import PromptTemplate
16
- from huggingface_hub import InferenceClient
 
17
  from langchain_core.language_models.llms import LLM
18
- from typing import Optional, List, Any
19
 
20
- # Fix OpenMP environment variable issue
21
- os.environ["OMP_NUM_THREADS"] = "1"
22
 
23
- # Load environment variables first
24
- # load_dotenv() # This will need commented out on HuggingFace. To run the code yourself on your device, keep this and comment out the api_key var
25
- # via os.getenv
26
- api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN")
27
 
28
  print("Loading portfolio data...")
29
 
@@ -66,13 +65,6 @@ prompt = PromptTemplate(
66
  partial_variables={"system_prompt": system_prompt_content}
67
  )
68
 
69
- # Get HF token
70
- hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
71
-
72
- if not hf_token:
73
- print("ERROR: Token not found in environment variables!")
74
- exit(1)
75
-
76
  print("Initializing model...")
77
 
78
  # Initialize InferenceClient
@@ -103,9 +95,23 @@ class HFInferenceClientLLM(LLM):
103
  # Create LLM instance
104
  llm = HFInferenceClientLLM(client=client, model="meta-llama/Llama-3.2-3B-Instruct")
105
 
106
- # Create the document processing chain
107
- combine_docs_chain = create_stuff_documents_chain(llm, prompt)
108
- qa_chain = create_retrieval_chain(vectorstore.as_retriever(), combine_docs_chain)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  print("RAG system ready!")
111
 
@@ -114,27 +120,25 @@ print("RAG system ready!")
114
  def chat_with_portfolio(message, history):
115
  """Process user message and return AI response"""
116
  try:
117
- response = qa_chain.invoke({"input": message})
118
- return response["answer"]
119
  except Exception as e:
120
  return f"Error: {str(e)}"
121
 
 
122
  # Aesthetics - Fixed CSS for proper icon sizing
123
  custom_css = """
124
  @import url('https://fonts.googleapis.com/css2?family=Quicksand:wght@400;600&display=swap');
125
 
126
- /* Global Font and Body */
127
  * {
128
  font-family: 'Quicksand', sans-serif !important;
129
  }
130
 
131
- /* The Main Background */
132
  .gradio-container {
133
  background: linear-gradient(135deg, #a78bfa 0%, #6366f1 50%, #3b82f6 100%) !important;
134
  background-attachment: fixed !important;
135
  }
136
 
137
- /* Glassmorphism for the Chat bubbles */
138
  .message-wrap {
139
  background: rgba(255, 255, 255, 0.15) !important;
140
  backdrop-filter: blur(12px) !important;
@@ -147,7 +151,6 @@ custom_css = """
147
  color: white !important;
148
  }
149
 
150
- /* Input Box Styling */
151
  .input-wrap textarea,
152
  textarea {
153
  background: rgba(255, 255, 255, 0.1) !important;
@@ -161,7 +164,6 @@ textarea::placeholder {
161
  color: rgba(255, 255, 255, 0.6) !important;
162
  }
163
 
164
- /* FIX FOR HUGE ICONS - Constrain all buttons and icons */
165
  button {
166
  max-width: 100px !important;
167
  max-height: 44px !important;
@@ -176,14 +178,12 @@ button svg {
176
  max-height: 20px !important;
177
  }
178
 
179
- /* Specific fix for send button */
180
  .submit-btn,
181
  button[aria-label="Submit"] {
182
  max-width: 60px !important;
183
  width: 60px !important;
184
  }
185
 
186
- /* Example buttons styling */
187
  .examples button {
188
  background: rgba(255, 255, 255, 0.1) !important;
189
  border: 1px solid rgba(255, 255, 255, 0.2) !important;
@@ -196,12 +196,10 @@ button[aria-label="Submit"] {
196
  background: rgba(255, 255, 255, 0.2) !important;
197
  }
198
 
199
- /* Hide Gradio footer */
200
  footer {
201
  display: none !important;
202
  }
203
 
204
- /* Chatbot container styling */
205
  .chatbot {
206
  background: rgba(255, 255, 255, 0.05) !important;
207
  border-radius: 20px !important;
@@ -209,7 +207,6 @@ footer {
209
  """
210
 
211
  with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
212
- # Title with specific white color for contrast
213
  gr.Markdown(
214
  "<h1 style='text-align: center; color: white; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);'>πŸˆβ€β¬› Chat with Adda-Bot</h1>"
215
  )
@@ -228,4 +225,4 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
228
  )
229
 
230
  if __name__ == "__main__":
231
- demo.launch()
 
 
 
1
  import os
2
  from pathlib import Path
3
+ from typing import Optional, List, Any
4
  import gradio as gr
5
 
6
+ # Fix OpenMP environment variable issue BEFORE other imports
7
+ os.environ["OMP_NUM_THREADS"] = "1"
8
+
9
+ # NO langchain.chains anymore (hopefully this fixes it)
10
  from langchain_huggingface import HuggingFaceEmbeddings
11
  from langchain_community.document_loaders import UnstructuredMarkdownLoader
12
  from langchain_text_splitters import MarkdownHeaderTextSplitter
13
  from langchain_community.vectorstores import Chroma
 
 
14
  from langchain_core.prompts import PromptTemplate
15
+ from langchain_core.runnables import RunnablePassthrough
16
+ from langchain_core.output_parsers import StrOutputParser
17
  from langchain_core.language_models.llms import LLM
18
+ from huggingface_hub import InferenceClient
19
 
20
+ # Get HF token
21
+ hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
22
 
23
+ if not hf_token:
24
+ print("ERROR: Token not found in environment variables!")
25
+ exit(1)
 
26
 
27
  print("Loading portfolio data...")
28
 
 
65
  partial_variables={"system_prompt": system_prompt_content}
66
  )
67
 
 
 
 
 
 
 
 
68
  print("Initializing model...")
69
 
70
  # Initialize InferenceClient
 
95
  # Create LLM instance
96
  llm = HFInferenceClientLLM(client=client, model="meta-llama/Llama-3.2-3B-Instruct")
97
 
98
+ # Create retriever
99
+ retriever = vectorstore.as_retriever()
100
+
101
+ # Build RAG chain manually - NO langchain.chains imports needed!
102
+ def format_docs(docs):
103
+ return "\n\n".join([d.page_content for d in docs])
104
+
105
+ # Create RAG chain using RunnablePassthrough
106
+ rag_chain = (
107
+ {
108
+ "context": retriever | format_docs,
109
+ "input": RunnablePassthrough()
110
+ }
111
+ | prompt
112
+ | llm
113
+ | StrOutputParser()
114
+ )
115
 
116
  print("RAG system ready!")
117
 
 
120
  def chat_with_portfolio(message, history):
121
  """Process user message and return AI response"""
122
  try:
123
+ response = rag_chain.invoke(message)
124
+ return response
125
  except Exception as e:
126
  return f"Error: {str(e)}"
127
 
128
+
129
  # Aesthetics - Fixed CSS for proper icon sizing
130
  custom_css = """
131
  @import url('https://fonts.googleapis.com/css2?family=Quicksand:wght@400;600&display=swap');
132
 
 
133
  * {
134
  font-family: 'Quicksand', sans-serif !important;
135
  }
136
 
 
137
  .gradio-container {
138
  background: linear-gradient(135deg, #a78bfa 0%, #6366f1 50%, #3b82f6 100%) !important;
139
  background-attachment: fixed !important;
140
  }
141
 
 
142
  .message-wrap {
143
  background: rgba(255, 255, 255, 0.15) !important;
144
  backdrop-filter: blur(12px) !important;
 
151
  color: white !important;
152
  }
153
 
 
154
  .input-wrap textarea,
155
  textarea {
156
  background: rgba(255, 255, 255, 0.1) !important;
 
164
  color: rgba(255, 255, 255, 0.6) !important;
165
  }
166
 
 
167
  button {
168
  max-width: 100px !important;
169
  max-height: 44px !important;
 
178
  max-height: 20px !important;
179
  }
180
 
 
181
  .submit-btn,
182
  button[aria-label="Submit"] {
183
  max-width: 60px !important;
184
  width: 60px !important;
185
  }
186
 
 
187
  .examples button {
188
  background: rgba(255, 255, 255, 0.1) !important;
189
  border: 1px solid rgba(255, 255, 255, 0.2) !important;
 
196
  background: rgba(255, 255, 255, 0.2) !important;
197
  }
198
 
 
199
  footer {
200
  display: none !important;
201
  }
202
 
 
203
  .chatbot {
204
  background: rgba(255, 255, 255, 0.05) !important;
205
  border-radius: 20px !important;
 
207
  """
208
 
209
  with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
 
210
  gr.Markdown(
211
  "<h1 style='text-align: center; color: white; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);'>πŸˆβ€β¬› Chat with Adda-Bot</h1>"
212
  )
 
225
  )
226
 
227
  if __name__ == "__main__":
228
+ demo.launch()