Abid Ali Awan commited on
Commit
10e2503
·
1 Parent(s): 66975c5

Update .gitignore to include __pycache__ and enhance app.py with improved regulatory query processing, tool detection, and user interaction features. Refactor streaming chat functionality and add memory search capabilities. Update requirements.txt to include openai-agents and gradio dependencies.

Browse files
Files changed (3) hide show
  1. .gitignore +2 -1
  2. app.py +305 -197
  3. requirements.txt +3 -1
.gitignore CHANGED
@@ -1 +1,2 @@
1
- .venv
 
 
1
+ .venv
2
+ *__pycache__
app.py CHANGED
@@ -1,23 +1,22 @@
1
  import hashlib
2
  import json
3
  import os
4
- from datetime import datetime
5
- from typing import Dict, List, Optional, Tuple
6
 
7
  import gradio as gr
 
8
  from mem0 import MemoryClient
9
  from openai import OpenAI
10
  from tavily import TavilyClient
11
 
12
  # Initialize services
13
  tavily_client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
14
- mem0_client = MemoryClient(api_key=os.getenv("MEM0_API_KEY"))
15
-
16
- # Initialize OpenAI client with Keywords AI endpoint
17
  client = OpenAI(
18
  base_url="https://api.keywordsai.co/api/",
19
  api_key=os.getenv("KEYWORDS_API_KEY"),
20
  )
 
21
 
22
  # Regulatory websites mapping
23
  REGULATORY_SOURCES = {
@@ -38,16 +37,21 @@ REGULATORY_SOURCES = {
38
  },
39
  }
40
 
 
 
 
 
 
 
41
 
42
  class RegRadarChat:
43
  def __init__(self):
44
- self.conversation_state = {}
45
  self.cached_searches = {}
46
 
47
  def generate_cache_key(self, industry: str, region: str, keywords: str) -> str:
48
- """Generate a unique cache key for search parameters"""
49
- content = f"{industry.lower()}_{region.lower()}_{keywords.lower()}"
50
- return hashlib.md5(content.encode()).hexdigest()
51
 
52
  def call_llm(self, prompt: str, temperature: float = 0.3) -> str:
53
  """Make a call to the LLM"""
@@ -62,51 +66,29 @@ class RegRadarChat:
62
  print(f"LLM call error: {e}")
63
  return "I apologize, but I encountered an error processing your request."
64
 
65
- def check_cache(self, cache_key: str) -> Optional[Dict]:
66
- """Check if we have cached results for this search using latest Mem0 search best practices"""
67
- try:
68
- # Use Mem0 search with metadata filter for cache_key
69
- filters = {
70
- "AND": [
71
- {"metadata": {"cache_key": cache_key}},
72
- {"metadata": {"type": "cache"}},
73
- ]
74
- }
75
- memories = mem0_client.get_all(
76
- version="v2", filters=filters, page=1, page_size=1
77
- )
78
- if memories and len(memories) > 0:
79
- memory_content = memories[0].get("content", "")
80
- if "cached_data:" in memory_content:
81
- cached_json = memory_content.split("cached_data:", 1)[1]
82
- return json.loads(cached_json)
83
- except Exception as e:
84
- print(f"Cache check error: {e}")
85
- return None
86
-
87
- def save_to_cache(self, cache_key: str, data: Dict):
88
- """Save crawled data to cache using latest Mem0 add best practices"""
89
  try:
90
- cache_data = {
91
- "cache_key": cache_key,
92
- "timestamp": datetime.now().isoformat(),
93
- "data": data,
94
- }
95
- mem0_client.add(
96
- messages=[
97
- {
98
- "role": "system",
99
- "content": f"cache_key:{cache_key} cached_data:{json.dumps(cache_data)}",
100
- }
101
- ],
102
- user_id="cache_system",
103
- metadata={"type": "cache", "cache_key": cache_key},
104
  )
 
 
 
 
105
  except Exception as e:
106
- print(f"Cache save error: {e}")
107
 
108
  def crawl_regulatory_sites(self, industry: str, region: str, keywords: str) -> Dict:
109
  """Crawl regulatory websites for updates"""
 
 
 
 
 
110
  urls_to_crawl = REGULATORY_SOURCES.get(region, REGULATORY_SOURCES["US"])
111
  all_results = []
112
 
@@ -118,35 +100,31 @@ class RegRadarChat:
118
  - Focus on recent content (last 30 days)
119
  """
120
 
121
- for source_name, url in list(urls_to_crawl.items())[
122
- :3
123
- ]: # Limit to 3 sources for speed
124
  try:
125
  crawl_response = tavily_client.crawl(
126
  url=url, max_depth=2, limit=5, instructions=crawl_instructions
127
  )
128
-
129
  for result in crawl_response.get("results", []):
130
  all_results.append(
131
  {
132
  "source": source_name,
133
- "url": result.get("url", ""),
134
  "title": result.get("title", ""),
135
  "content": result.get("raw_content", "")[:1500],
136
  }
137
  )
138
-
139
  except Exception as e:
140
  print(f"Crawl error for {source_name}: {e}")
141
 
142
- # Also do a general search
143
  try:
144
  search_results = tavily_client.search(
145
- query=f"{industry} {region} regulatory updates compliance {keywords} 2024",
146
  max_results=5,
147
  include_raw_content=True,
148
  )
149
-
150
  for result in search_results.get("results", []):
151
  all_results.append(
152
  {
@@ -159,178 +137,290 @@ class RegRadarChat:
159
  except Exception as e:
160
  print(f"Search error: {e}")
161
 
162
- return {"results": all_results}
163
-
164
- def summarize_results(self, results: List[Dict]) -> str:
165
- """Summarize crawled results into a readable format"""
166
- if not results:
167
- return "No regulatory updates found for your criteria."
168
-
169
- # Group by source
170
- by_source = {}
171
- for result in results[:8]: # Limit to top 8 results
172
- source = result.get("source", "Unknown")
173
- if source not in by_source:
174
- by_source[source] = []
175
- by_source[source].append(result)
176
-
177
- # Create summary prompt
178
- prompt = f"""
179
- Analyze these regulatory updates and provide:
180
- 1. A brief overview of the key findings
181
- 2. The most important compliance changes
182
- 3. Action items for compliance teams
183
-
184
- Updates:
185
- {json.dumps(by_source, indent=2)}
186
-
187
- Format your response in a conversational way, using bullet points for clarity.
188
- """
189
 
190
- return self.call_llm(prompt)
191
-
192
- def process_message(
193
- self, message: str, history: List[Dict]
194
- ) -> Tuple[List[Dict], str]:
195
- """Process user message and generate response (open Q&A style)"""
196
- if not message.strip():
197
- response = "👋 Hello! I'm RegRadar, your AI regulatory compliance assistant.\n\nAsk me any question about regulations, compliance, or recent updates in any industry or region."
198
- else:
199
- prompt = f"""
200
- You are an expert regulatory compliance assistant. Answer the following question as helpfully and specifically as possible. If the question is about a particular industry, region, or topic, use your knowledge to provide the most relevant and up-to-date information. If you don't have enough information, say so.\n\nQuestion: {message}
201
- """
202
- response = self.call_llm(prompt)
203
-
204
- history.append({"role": "user", "content": message})
205
- history.append({"role": "assistant", "content": response})
206
- return history, ""
207
-
208
- def stream_llm(self, prompt: str, temperature: float = 0.3):
209
- """Stream LLM response using OpenAI's streaming API."""
210
  try:
211
- stream = client.chat.completions.create(
212
- model="gpt-4.1-mini",
213
- messages=[{"role": "user", "content": prompt}],
214
- temperature=temperature,
215
- stream=True,
 
 
 
216
  )
217
- partial = ""
218
- for chunk in stream:
219
- delta = getattr(chunk.choices[0].delta, "content", None)
220
- if delta:
221
- partial += delta
222
- yield partial
223
  except Exception as e:
224
- yield f"I apologize, but I encountered an error processing your request: {e}"
 
 
 
 
 
 
 
 
225
 
226
 
227
- # Initialize the chat instance
228
  chat_instance = RegRadarChat()
229
 
230
 
231
- # Streaming generator for regulatory Q&
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  def streaming_chatbot(message, history):
233
- # 0. Intent detection: decide if this is a regulatory/compliance question or just a general/greeting/chat
 
 
 
 
 
 
 
 
 
 
234
  intent_prompt = f"""
235
  Is the following user message a regulatory, compliance, or update-related question (yes/no)?
236
  Message: {message}
237
  Respond with only 'yes' or 'no'.
238
  """
 
239
  intent = chat_instance.call_llm(intent_prompt).strip().lower()
 
240
  if intent.startswith("n"):
241
- # General chat, not regulatory: use LLM for a conversational response
242
- chat_prompt = f"You are a friendly AI assistant. Respond conversationally to the following user message.\nMessage: {message}"
243
- history = history + [{"role": "user", "content": message}]
244
- history = history + [{"role": "assistant", "content": ""}]
 
 
 
 
 
 
 
 
 
 
 
245
  for chunk in chat_instance.stream_llm(chat_prompt):
246
- history[-1]["content"] = chunk
 
247
  yield history, ""
 
248
  return
249
 
250
- # 1. Extract industry, region, and keywords from the user's message using the LLM
 
 
 
 
 
 
 
 
 
 
251
  extract_prompt = f"""
252
- Extract the industry, region, and main keywords from the following user query for regulatory monitoring.
253
- Respond in JSON with keys: industry, region, keywords. If not specified, use 'General' for industry, 'US' for region, and use the main topic as keywords.
254
- Query: {message}
 
 
255
  """
 
256
  extraction = chat_instance.call_llm(extract_prompt)
257
  try:
258
- parsed = json.loads(extraction)
259
- industry = parsed.get("industry", "General")
260
- region = parsed.get("region", "US")
261
- keywords = parsed.get("keywords", message)
262
- except Exception:
263
- industry = "General"
264
- region = "US"
265
- keywords = message
266
-
267
- # 2. Crawl regulatory sites (or check cache)
268
- cache_key = chat_instance.generate_cache_key(industry, region, keywords)
269
- cached = chat_instance.check_cache(cache_key)
270
- if cached:
271
- results = cached["data"]["results"]
272
- else:
273
- data = chat_instance.crawl_regulatory_sites(industry, region, keywords)
274
- chat_instance.save_to_cache(cache_key, data)
275
- results = data["results"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
 
277
- # 3. Summarize results (streaming)
278
- if not results:
279
- summary_prompt = f"No regulatory updates found for {industry} in {region} with keywords: {keywords}."
280
  else:
281
- # Group by source for summary
282
  by_source = {}
283
- for result in results[:8]:
284
  source = result.get("source", "Unknown")
285
  if source not in by_source:
286
  by_source[source] = []
287
  by_source[source].append(result)
 
288
  summary_prompt = f"""
289
- Analyze these regulatory updates and provide:
290
- 1. A brief overview of the key findings
291
- 2. The most important compliance changes
292
- 3. Action items for compliance teams
293
 
294
- Updates:
295
  {json.dumps(by_source, indent=2)}
296
 
297
- Format your response in a conversational way, using bullet points for clarity.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  """
299
 
300
- # Start with an empty assistant message
301
- history = history + [{"role": "assistant", "content": ""}]
 
 
 
 
302
  for chunk in chat_instance.stream_llm(summary_prompt):
303
- history[-1]["content"] = chunk
 
304
  yield history, ""
305
 
 
 
 
 
 
 
 
 
 
 
 
 
306
 
307
  # Create Gradio interface
308
- with gr.Blocks(title="RegRadar Chat", theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
309
  gr.HTML("""
310
  <center>
311
- <h1 style="text-align: center;">🛰️RegRadar</h1>
312
- <p><b>Ask any question about regulations, compliance, or recent updates in any industry or region.</b></p>
313
  </center>
314
  """)
315
 
 
316
  chatbot = gr.Chatbot(
317
- height=400,
318
  type="messages",
319
- avatar_images=(None, "https://media.roboflow.com/spaces/gemini-icon.png"),
320
  show_copy_button=True,
 
321
  )
322
 
323
- example_queries = [
324
- "Show me the latest SEC regulations for fintech.",
325
- "What are the new data privacy rules in the EU?",
326
- "Any updates on ESG compliance for energy companies?",
327
- "Scan for healthcare regulations in the US.",
328
- "What are the global trends in AI regulation?",
329
- ]
330
-
331
  with gr.Row(equal_height=True):
332
  msg = gr.Textbox(
333
- placeholder="Ask about regulatory updates, compliance, or any related topic...",
334
  show_label=False,
335
  scale=18,
336
  autofocus=True,
@@ -338,37 +428,55 @@ with gr.Blocks(title="RegRadar Chat", theme=gr.themes.Soft()) as demo:
338
  submit = gr.Button("Send", variant="primary", scale=1, min_width=60)
339
  clear = gr.Button("Clear", scale=1, min_width=60)
340
 
341
- gr.Examples(examples=example_queries, inputs=msg, label="Example Queries")
 
 
 
 
 
 
 
342
 
343
- # Event handlers
344
- def user_submit(message, history):
345
- if not message.strip():
346
- # Do not add empty messages, just return the current history and clear the input
347
- return history, "", gr.update(interactive=True), gr.update(interactive=True)
348
- # Always use the open Q&A handler
349
- new_history, _ = chat_instance.process_message(message, history)
350
- return new_history, "", gr.update(interactive=True), gr.update(interactive=True)
351
 
352
- def enable_input():
353
- return gr.update(interactive=True), gr.update(interactive=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
 
 
355
  submit_event = msg.submit(streaming_chatbot, [msg, chatbot], [chatbot, msg])
356
  click_event = submit.click(streaming_chatbot, [msg, chatbot], [chatbot, msg])
357
-
358
  clear.click(lambda: ([], ""), outputs=[chatbot, msg])
359
 
360
- gr.Markdown("""
361
- <details>
362
- <summary><strong>Features</strong></summary>
363
- <ul>
364
- <li>🔍 Intelligent web crawling of regulatory sites</li>
365
- <li>💾 Cached results to avoid duplicate crawling</li>
366
- <li>🤖 AI-powered analysis and summaries</li>
367
- <li>💬 Natural conversation interface</li>
368
- </ul>
369
- </details>
370
  """)
371
 
372
- # Set up event loop properly for Gradio
373
  if __name__ == "__main__":
374
  demo.launch()
 
1
  import hashlib
2
  import json
3
  import os
4
+ import time
5
+ from typing import Dict, List, Tuple
6
 
7
  import gradio as gr
8
+ from gradio import ChatMessage
9
  from mem0 import MemoryClient
10
  from openai import OpenAI
11
  from tavily import TavilyClient
12
 
13
  # Initialize services
14
  tavily_client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
 
 
 
15
  client = OpenAI(
16
  base_url="https://api.keywordsai.co/api/",
17
  api_key=os.getenv("KEYWORDS_API_KEY"),
18
  )
19
+ mem0_client = MemoryClient(api_key=os.getenv("MEM0_API_KEY"))
20
 
21
  # Regulatory websites mapping
22
  REGULATORY_SOURCES = {
 
37
  },
38
  }
39
 
40
+ # Avatar configuration
41
+ AVATAR_IMAGES = (
42
+ None,
43
+ "https://media.roboflow.com/spaces/gemini-icon.png",
44
+ )
45
+
46
 
47
  class RegRadarChat:
48
  def __init__(self):
 
49
  self.cached_searches = {}
50
 
51
  def generate_cache_key(self, industry: str, region: str, keywords: str) -> str:
52
+ """Generate a unique cache key"""
53
+ key = f"{industry}:{region}:{keywords}".lower()
54
+ return hashlib.md5(key.encode()).hexdigest()
55
 
56
  def call_llm(self, prompt: str, temperature: float = 0.3) -> str:
57
  """Make a call to the LLM"""
 
66
  print(f"LLM call error: {e}")
67
  return "I apologize, but I encountered an error processing your request."
68
 
69
+ def stream_llm(self, prompt: str, temperature: float = 0.3):
70
+ """Stream LLM response"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  try:
72
+ stream = client.chat.completions.create(
73
+ model="gpt-4.1-mini",
74
+ messages=[{"role": "user", "content": prompt}],
75
+ temperature=temperature,
76
+ stream=True,
 
 
 
 
 
 
 
 
 
77
  )
78
+ for chunk in stream:
79
+ delta = getattr(chunk.choices[0].delta, "content", None)
80
+ if delta:
81
+ yield delta
82
  except Exception as e:
83
+ yield f"Error: {str(e)}"
84
 
85
  def crawl_regulatory_sites(self, industry: str, region: str, keywords: str) -> Dict:
86
  """Crawl regulatory websites for updates"""
87
+ # Check cache first
88
+ cache_key = self.generate_cache_key(industry, region, keywords)
89
+ if cache_key in self.cached_searches:
90
+ return self.cached_searches[cache_key]
91
+
92
  urls_to_crawl = REGULATORY_SOURCES.get(region, REGULATORY_SOURCES["US"])
93
  all_results = []
94
 
 
100
  - Focus on recent content (last 30 days)
101
  """
102
 
103
+ # Crawl regulatory sites
104
+ for source_name, url in list(urls_to_crawl.items())[:3]:
 
105
  try:
106
  crawl_response = tavily_client.crawl(
107
  url=url, max_depth=2, limit=5, instructions=crawl_instructions
108
  )
 
109
  for result in crawl_response.get("results", []):
110
  all_results.append(
111
  {
112
  "source": source_name,
113
+ "url": url,
114
  "title": result.get("title", ""),
115
  "content": result.get("raw_content", "")[:1500],
116
  }
117
  )
 
118
  except Exception as e:
119
  print(f"Crawl error for {source_name}: {e}")
120
 
121
+ # General search
122
  try:
123
  search_results = tavily_client.search(
124
+ query=f"{industry} {region} regulatory updates compliance {keywords} 2024 2025",
125
  max_results=5,
126
  include_raw_content=True,
127
  )
 
128
  for result in search_results.get("results", []):
129
  all_results.append(
130
  {
 
137
  except Exception as e:
138
  print(f"Search error: {e}")
139
 
140
+ results = {"results": all_results, "total_found": len(all_results)}
141
+ self.cached_searches[cache_key] = results
142
+ return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
+ def save_to_memory(self, user_id: str, query: str, response: str):
145
+ """Save interaction to memory"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  try:
147
+ messages = [
148
+ {"role": "user", "content": query},
149
+ {"role": "assistant", "content": response},
150
+ ]
151
+ mem0_client.add(
152
+ messages=messages,
153
+ user_id=user_id,
154
+ metadata={"type": "regulatory_query"},
155
  )
 
 
 
 
 
 
156
  except Exception as e:
157
+ print(f"Memory save error: {e}")
158
+
159
+ def search_memory(self, user_id: str, query: str) -> List[Dict]:
160
+ """Search for similar past queries"""
161
+ try:
162
+ memories = mem0_client.search(query=query, user_id=user_id, limit=3)
163
+ return memories
164
+ except:
165
+ return []
166
 
167
 
168
+ # Initialize chat instance
169
  chat_instance = RegRadarChat()
170
 
171
 
172
+ def determine_intended_tool(message: str) -> Tuple[str, str]:
173
+ """Determine which tool will be used based on the message"""
174
+ message_lower = message.lower()
175
+
176
+ if any(
177
+ word in message_lower
178
+ for word in ["crawl", "scan", "check", "latest", "update", "recent"]
179
+ ):
180
+ return "web_crawler", "Regulatory Web Crawler"
181
+ elif any(
182
+ word in message_lower for word in ["remember", "history", "past", "previous"]
183
+ ):
184
+ return "memory", "Memory Search"
185
+ else:
186
+ return "search", "Regulatory Search"
187
+
188
+
189
  def streaming_chatbot(message, history):
190
+ """Process messages with tool visibility"""
191
+ if not message.strip():
192
+ return history, ""
193
+
194
+ # Add user message
195
+ history.append(ChatMessage(role="user", content=message))
196
+
197
+ # Start timer
198
+ start_time = time.time()
199
+
200
+ # Detect if this is a regulatory query
201
  intent_prompt = f"""
202
  Is the following user message a regulatory, compliance, or update-related question (yes/no)?
203
  Message: {message}
204
  Respond with only 'yes' or 'no'.
205
  """
206
+
207
  intent = chat_instance.call_llm(intent_prompt).strip().lower()
208
+
209
  if intent.startswith("n"):
210
+ # General chat
211
+ history.append(
212
+ ChatMessage(role="assistant", content="💬 Processing general query...")
213
+ )
214
+ yield history, ""
215
+
216
+ # Clear processing message and stream response
217
+ history.pop()
218
+
219
+ chat_prompt = (
220
+ f"You are a friendly AI assistant. Respond conversationally to: {message}"
221
+ )
222
+ streaming_content = ""
223
+ history.append(ChatMessage(role="assistant", content=""))
224
+
225
  for chunk in chat_instance.stream_llm(chat_prompt):
226
+ streaming_content += chunk
227
+ history[-1] = ChatMessage(role="assistant", content=streaming_content)
228
  yield history, ""
229
+
230
  return
231
 
232
+ # Show tool detection
233
+ tool_key, tool_name = determine_intended_tool(message)
234
+
235
+ # Initial processing message with tool info
236
+ status_msg = (
237
+ f"🔍 Using **{tool_name}** to analyze your query (estimated 10-20 seconds)..."
238
+ )
239
+ history.append(ChatMessage(role="assistant", content=status_msg))
240
+ yield history, ""
241
+
242
+ # Extract parameters
243
  extract_prompt = f"""
244
+ Extract industry, region, and keywords from this query:
245
+ "{message}"
246
+
247
+ Return as JSON with keys: industry, region, keywords
248
+ If not specified, use General/US/main topic
249
  """
250
+
251
  extraction = chat_instance.call_llm(extract_prompt)
252
  try:
253
+ params = json.loads(extraction)
254
+ except:
255
+ params = {"industry": "General", "region": "US", "keywords": message}
256
+
257
+ # Clear status and show parameter extraction
258
+ history.pop()
259
+
260
+ # Show tool execution steps
261
+ tool_status = f"""
262
+ 🛠️ **Tool Execution Status**
263
+
264
+ 📍 **Parameters Extracted:**
265
+ - Industry: {params["industry"]}
266
+ - Region: {params["region"]}
267
+ - Keywords: {params["keywords"]}
268
+
269
+ 🔄 **Executing {tool_name}...**
270
+ """
271
+ history.append(ChatMessage(role="assistant", content=tool_status))
272
+ yield history, ""
273
+
274
+ # Execute tool (crawl sites)
275
+ crawl_results = chat_instance.crawl_regulatory_sites(
276
+ params["industry"], params["region"], params["keywords"]
277
+ )
278
+
279
+ # Update with results count
280
+ history[-1] = ChatMessage(
281
+ role="assistant",
282
+ content=tool_status
283
+ + f"\n\n✅ **Found {crawl_results['total_found']} regulatory updates**",
284
+ )
285
+ yield history, ""
286
+
287
+ # Show collapsible raw results
288
+ if crawl_results["results"]:
289
+ # Format results for display
290
+ results_display = []
291
+ for i, result in enumerate(crawl_results["results"][:5], 1):
292
+ results_display.append(f"""
293
+ **{i}. {result["source"]}**
294
+ - Title: {result["title"][:100]}...
295
+ - URL: {result["url"]}
296
+ """)
297
+
298
+ collapsible_results = f"""
299
+ <details>
300
+ <summary><strong>📋 Raw Regulatory Data</strong> - Click to expand</summary>
301
+
302
+ {"".join(results_display)}
303
+
304
+ </details>
305
+ """
306
+ history.append(ChatMessage(role="assistant", content=collapsible_results))
307
+ yield history, ""
308
+
309
+ # Check memory for similar queries
310
+ memory_results = chat_instance.search_memory("user", message)
311
+ if memory_results:
312
+ memory_msg = """
313
+ <details>
314
+ <summary><strong>💾 Related Past Queries</strong> - Click to expand</summary>
315
+
316
+ Found {len(memory_results)} similar past queries in memory.
317
+
318
+ </details>
319
+ """
320
+ history.append(ChatMessage(role="assistant", content=memory_msg))
321
+ yield history, ""
322
+
323
+ # Generate final analysis
324
+ history.append(
325
+ ChatMessage(role="assistant", content="📝 **Generating Compliance Report...**")
326
+ )
327
+ yield history, ""
328
 
329
+ # Create analysis prompt
330
+ if not crawl_results["results"]:
331
+ summary_prompt = f"No regulatory updates found for {params['industry']} in {params['region']} with keywords: {params['keywords']}. Provide helpful suggestions on where to look or what to search for."
332
  else:
 
333
  by_source = {}
334
+ for result in crawl_results["results"][:8]:
335
  source = result.get("source", "Unknown")
336
  if source not in by_source:
337
  by_source[source] = []
338
  by_source[source].append(result)
339
+
340
  summary_prompt = f"""
341
+ Create a comprehensive regulatory compliance report for {params["industry"]} industry in {params["region"]} region.
 
 
 
342
 
343
+ Analyze these regulatory updates:
344
  {json.dumps(by_source, indent=2)}
345
 
346
+ Include:
347
+ # 📋 Executive Summary
348
+ (2-3 sentences overview)
349
+
350
+ # 🔍 Key Findings
351
+ • Finding 1
352
+ • Finding 2
353
+ • Finding 3
354
+
355
+ # ⚠️ Compliance Requirements
356
+ - List main requirements with priorities
357
+
358
+ # ✅ Action Items
359
+ - Specific actions with suggested timelines
360
+
361
+ # 📚 Resources
362
+ - Links and references
363
+
364
+ Use emojis, bullet points, and clear formatting. Keep it professional but readable.
365
  """
366
 
367
+ # Clear generating message and stream final report
368
+ history.pop()
369
+
370
+ streaming_content = ""
371
+ history.append(ChatMessage(role="assistant", content=""))
372
+
373
  for chunk in chat_instance.stream_llm(summary_prompt):
374
+ streaming_content += chunk
375
+ history[-1] = ChatMessage(role="assistant", content=streaming_content)
376
  yield history, ""
377
 
378
+ # Save to memory
379
+ chat_instance.save_to_memory("user", message, streaming_content)
380
+
381
+ # Show completion time
382
+ elapsed = time.time() - start_time
383
+ history.append(
384
+ ChatMessage(
385
+ role="assistant", content=f"✨ **Analysis complete** ({elapsed:.1f}s)"
386
+ )
387
+ )
388
+ yield history, ""
389
+
390
 
391
  # Create Gradio interface
392
+ with gr.Blocks(
393
+ title="RegRadar - AI Regulatory Compliance Assistant",
394
+ theme=gr.themes.Soft(),
395
+ css="""
396
+ .tool-status {
397
+ background-color: #f0f4f8;
398
+ padding: 10px;
399
+ border-radius: 5px;
400
+ margin: 10px 0;
401
+ }
402
+ """,
403
+ ) as demo:
404
+ # Header
405
  gr.HTML("""
406
  <center>
407
+ <h1 style="text-align: center;">🛡️ RegRadar</h1>
408
+ <p><b>AI-powered regulatory compliance assistant that monitors global regulations</b></p>
409
  </center>
410
  """)
411
 
412
+ # Main chat interface
413
  chatbot = gr.Chatbot(
414
+ height=500,
415
  type="messages",
416
+ avatar_images=AVATAR_IMAGES,
417
  show_copy_button=True,
418
+ bubble_full_width=False,
419
  )
420
 
 
 
 
 
 
 
 
 
421
  with gr.Row(equal_height=True):
422
  msg = gr.Textbox(
423
+ placeholder="Ask about regulatory updates, compliance requirements, or any industry regulations...",
424
  show_label=False,
425
  scale=18,
426
  autofocus=True,
 
428
  submit = gr.Button("Send", variant="primary", scale=1, min_width=60)
429
  clear = gr.Button("Clear", scale=1, min_width=60)
430
 
431
+ # Example queries
432
+ example_queries = [
433
+ "Show me the latest SEC regulations for fintech",
434
+ "What are the new data privacy rules in the EU?",
435
+ "Any updates on ESG compliance for energy companies?",
436
+ "Scan for healthcare regulations in the US",
437
+ "What are the global trends in AI regulation?",
438
+ ]
439
 
440
+ gr.Examples(examples=example_queries, inputs=msg, label="Example Queries")
 
 
 
 
 
 
 
441
 
442
+ # Tool information panel
443
+ with gr.Accordion("🛠️ Available Tools", open=False):
444
+ gr.Markdown("""
445
+ ### RegRadar uses these intelligent tools:
446
+
447
+ **🔍 Regulatory Web Crawler**
448
+ - Crawls official regulatory websites (SEC, FDA, FTC, etc.)
449
+ - Searches for recent updates and compliance changes
450
+ - Focuses on last 30 days of content
451
+
452
+ **🌐 Regulatory Search Engine**
453
+ - Searches across multiple sources for regulatory updates
454
+ - Finds industry-specific compliance information
455
+ - Aggregates results from various regulatory bodies
456
+
457
+ **💾 Memory System**
458
+ - Remembers past queries and responses
459
+ - Learns from your compliance interests
460
+ - Provides context from previous interactions
461
+
462
+ **🤖 AI Analysis Engine**
463
+ - Analyzes and summarizes regulatory findings
464
+ - Generates actionable compliance recommendations
465
+ - Creates executive summaries and action items
466
+ """)
467
 
468
+ # Event handlers
469
  submit_event = msg.submit(streaming_chatbot, [msg, chatbot], [chatbot, msg])
470
  click_event = submit.click(streaming_chatbot, [msg, chatbot], [chatbot, msg])
 
471
  clear.click(lambda: ([], ""), outputs=[chatbot, msg])
472
 
473
+ # Footer
474
+ gr.HTML("""
475
+ <div style="text-align: center; padding: 20px; color: #666; font-size: 0.9rem;">
476
+ <p>RegRadar monitors regulatory updates from SEC, FDA, FTC, EU Commission, and more.</p>
477
+ <p>All analysis is AI-generated. Always verify with official sources.</p>
478
+ </div>
 
 
 
 
479
  """)
480
 
 
481
  if __name__ == "__main__":
482
  demo.launch()
requirements.txt CHANGED
@@ -1,4 +1,6 @@
1
  langgraph==0.4.8
2
  openai==1.88.0
3
  tavily-python==0.7.6
4
- mem0ai==0.1.108
 
 
 
1
  langgraph==0.4.8
2
  openai==1.88.0
3
  tavily-python==0.7.6
4
+ mem0ai==0.1.108
5
+ openai-agents==0.0.19
6
+ gradio==5.34.0