MaxonML commited on
Commit
96fa007
Β·
verified Β·
1 Parent(s): 83a7e50

Upload 2 files

Browse files
Files changed (2) hide show
  1. app3.py +882 -0
  2. requirements.txt +7 -0
app3.py ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import tempfile
4
+ import time
5
+
6
+ import gradio as gr
7
+ import numpy as np
8
+ import pandas as pd
9
+ from duckduckgo_search import DDGS
10
+ from google import genai
11
+ from google.genai import types
12
+
13
+ # 🎨 Responsive Glassmorphism CSS
14
+ glassy_css = """
15
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
16
+
17
+ *, *::before, *::after { box-sizing: border-box; }
18
+
19
+ body, html {
20
+ background: linear-gradient(135deg, #0a0f1a 0%, #111827 40%, #1a2332 100%) !important;
21
+ background-attachment: fixed;
22
+ color: #e0e0e0 !important;
23
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
24
+ }
25
+
26
+ .gradio-container {
27
+ background: transparent !important;
28
+ max-width: 1500px !important;
29
+ margin: 0 auto !important;
30
+ padding: 12px !important;
31
+ }
32
+
33
+ /* ===== RESPONSIVE STACKING ===== */
34
+ @media (max-width: 768px) {
35
+ .gradio-container { padding: 6px !important; }
36
+ .main-row { flex-direction: column !important; }
37
+ .main-row > .gr-column { min-width: 100% !important; max-width: 100% !important; }
38
+ .sidebar-col { display: none !important; }
39
+ h1 { font-size: 1.4rem !important; }
40
+ h3 { font-size: 1rem !important; }
41
+ }
42
+ @media (min-width: 769px) and (max-width: 1024px) {
43
+ .main-row { flex-wrap: wrap !important; }
44
+ .main-row > .gr-column { min-width: 48% !important; }
45
+ .sidebar-col { min-width: 100% !important; }
46
+ }
47
+
48
+ /* ===== GLASS PANELS ===== */
49
+ div[class*="panel"] {
50
+ background: rgba(255, 255, 255, 0.03) !important;
51
+ border: 1px solid rgba(255, 255, 255, 0.08) !important;
52
+ backdrop-filter: blur(20px) !important;
53
+ -webkit-backdrop-filter: blur(20px) !important;
54
+ border-radius: 16px !important;
55
+ box-shadow: 0 8px 32px rgba(0, 0, 0, 0.4) !important;
56
+ padding: 16px !important;
57
+ }
58
+
59
+ /* ===== SIDEBAR ===== */
60
+ .sidebar-col { border-right: 1px solid rgba(255,255,255,0.06) !important; }
61
+ .sidebar-col .gr-accordion { margin-bottom: 8px !important; }
62
+
63
+ /* ===== INPUTS ===== */
64
+ textarea, input[type="text"], input[type="password"] {
65
+ background: rgba(0, 0, 0, 0.3) !important;
66
+ border: 1px solid rgba(255, 255, 255, 0.12) !important;
67
+ color: #fff !important;
68
+ border-radius: 10px !important;
69
+ transition: border-color 0.2s ease !important;
70
+ font-family: 'Inter', sans-serif !important;
71
+ }
72
+ textarea:focus, input:focus {
73
+ border-color: rgba(0, 200, 150, 0.5) !important;
74
+ box-shadow: 0 0 12px rgba(0, 200, 150, 0.15) !important;
75
+ }
76
+
77
+ /* ===== PRIMARY BUTTON ===== */
78
+ button.primary {
79
+ background: linear-gradient(135deg, #00c896 0%, #00b4d8 100%) !important;
80
+ border: none !important;
81
+ color: #fff !important;
82
+ font-weight: 600 !important;
83
+ border-radius: 10px !important;
84
+ padding: 10px 20px !important;
85
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important;
86
+ box-shadow: 0 4px 15px rgba(0, 200, 150, 0.3) !important;
87
+ }
88
+ button.primary:hover {
89
+ transform: translateY(-2px) !important;
90
+ box-shadow: 0 6px 20px rgba(0, 200, 150, 0.5) !important;
91
+ }
92
+
93
+ /* ===== SECONDARY BUTTON ===== */
94
+ button.secondary {
95
+ background: rgba(255,255,255,0.06) !important;
96
+ border: 1px solid rgba(255,255,255,0.15) !important;
97
+ color: #c0c0c0 !important;
98
+ border-radius: 8px !important;
99
+ transition: all 0.2s ease !important;
100
+ }
101
+ button.secondary:hover {
102
+ background: rgba(255,255,255,0.12) !important;
103
+ color: #fff !important;
104
+ }
105
+
106
+ /* ===== TYPOGRAPHY ===== */
107
+ h1 {
108
+ color: #ffffff !important;
109
+ font-weight: 700 !important;
110
+ letter-spacing: -0.5px !important;
111
+ background: linear-gradient(135deg, #00c896, #00b4d8) !important;
112
+ -webkit-background-clip: text !important;
113
+ -webkit-text-fill-color: transparent !important;
114
+ background-clip: text !important;
115
+ }
116
+ h2, h3, h4 { color: #e8e8e8 !important; font-weight: 600 !important; }
117
+ p, span, label { color: #c0c0c0 !important; }
118
+
119
+ /* ===== SURVEYED LINKS ===== */
120
+ .surveyed-links a {
121
+ color: #60efff !important;
122
+ text-decoration: underline !important;
123
+ word-break: break-all !important;
124
+ }
125
+ .surveyed-links p { margin-bottom: 8px !important; line-height: 1.6 !important; }
126
+
127
+ /* ===== GALLERY ===== */
128
+ .viz-gallery { min-height: 200px; }
129
+ .viz-gallery .gallery-item img {
130
+ border-radius: 12px !important;
131
+ border: 1px solid rgba(255,255,255,0.08) !important;
132
+ cursor: pointer !important;
133
+ }
134
+
135
+ /* ===== ACCORDION ===== */
136
+ .gr-accordion { border-radius: 12px !important; overflow: hidden !important; }
137
+
138
+ /* ===== SCROLLABLE MARKDOWN ===== */
139
+ .report-body {
140
+ max-height: 70vh;
141
+ overflow-y: auto;
142
+ padding-right: 8px;
143
+ }
144
+ .report-body::-webkit-scrollbar { width: 6px; }
145
+ .report-body::-webkit-scrollbar-thumb {
146
+ background: rgba(255,255,255,0.15);
147
+ border-radius: 3px;
148
+ }
149
+ """
150
+
151
+ # 🎯 Constants
152
+ QUICK_MODE = "Quick Research (Direct)"
153
+ DEEP_MODE = "Deep Research & Debate"
154
+ DEBATE_SKIPPED = "*Debate skipped for Quick mode.*"
155
+ VIZ_DIR = tempfile.mkdtemp(prefix="research_viz_")
156
+
157
+ GEMINI_MODELS = [
158
+ "gemini-2.5-flash",
159
+ "gemini-flash-latest",
160
+ "gemini-flash-lite-latest",
161
+ "gemini-2.5-flash-lite",
162
+ "gemini-2.0-flash",
163
+ ]
164
+
165
+ # πŸ› οΈ Core Functions
166
+
167
+
168
+ def make_safe(text):
169
+ """
170
+ STRICT SANITIZATION: Strips out ALL emojis and non-standard characters.
171
+ This guarantees that underlying network libraries on Windows will NEVER
172
+ crash with a 'UnicodeEncodeError'.
173
+ """
174
+ if not text:
175
+ return ""
176
+ return str(text).encode("ascii", "ignore").decode("ascii")
177
+
178
+
179
+ def search_web(
180
+ api_key, query, time_limit, primary_model=GEMINI_MODELS[0], max_results=3
181
+ ):
182
+ """Hybrid Grounding Engine: Tries Native Google Search first, falls back to DuckDuckGo."""
183
+
184
+ # Clean the query so we don't crash building the prompt
185
+ safe_query = make_safe(query)
186
+
187
+ # 1. ATTEMPT NATIVE GOOGLE AI SEARCH GROUNDING
188
+ try:
189
+ client = genai.Client(api_key=api_key)
190
+ time_context = (
191
+ f" Focus specifically on recent information from the {time_limit.lower()}."
192
+ if time_limit != "All time"
193
+ else ""
194
+ )
195
+ prompt = f"Conduct detailed, objective research on the following query: '{safe_query}'.{time_context} Provide comprehensive facts and statistics."
196
+
197
+ # Strip the prompt of emojis just to be absolutely safe
198
+ safe_prompt = make_safe(prompt)
199
+
200
+ config = types.GenerateContentConfig(
201
+ tools=[{"google_search": {}}], temperature=0.2
202
+ )
203
+
204
+ response = client.models.generate_content(
205
+ model=primary_model, contents=safe_prompt, config=config
206
+ )
207
+
208
+ urls = []
209
+ if response.candidates and response.candidates[0].grounding_metadata:
210
+ gm = response.candidates[0].grounding_metadata
211
+ chunks = getattr(gm, "grounding_chunks", [])
212
+ for chunk in chunks:
213
+ web = getattr(chunk, "web", None)
214
+ if web:
215
+ uri = getattr(web, "uri", None)
216
+ title = getattr(web, "title", "Source")
217
+ if uri:
218
+ urls.append(f"πŸ”— **[{title}]({uri})**\n> {uri}")
219
+
220
+ unique_urls = list(dict.fromkeys(urls))
221
+ if unique_urls:
222
+ # Make sure the returned text from the API doesn't contain weird characters that might crash the next step
223
+ return make_safe(response.text), "\n\n".join(unique_urls)
224
+
225
+ except Exception as e:
226
+ print(f"Native Grounding Info (Falling back to DDG): {e}")
227
+
228
+ # 2. FALLBACK TO DUCKDUCKGO SCAPING
229
+ try:
230
+ ddgs = DDGS()
231
+ timelimit_map = {
232
+ "Today": "d",
233
+ "Past week": "w",
234
+ "Past month": "m",
235
+ "Past year": "y",
236
+ "All time": None,
237
+ }
238
+ t = timelimit_map.get(time_limit)
239
+ results = list(ddgs.text(safe_query, timelimit=t, max_results=max_results))
240
+
241
+ extracted = []
242
+ urls = []
243
+ for r in results:
244
+ title = make_safe(r.get("title", "Untitled"))
245
+ href = r.get("href", "")
246
+ body = make_safe(r.get("body", ""))
247
+
248
+ if href and href.startswith("http"):
249
+ urls.append(f"πŸ”— **[{title}]({href})**\n> {href}")
250
+ extracted.append(f"Title: {title}\nLink: {href}\nSnippet: {body}")
251
+
252
+ url_text = "\n\n".join(urls) if urls else ""
253
+ data_text = "\n\n".join(extracted) if extracted else ""
254
+ return data_text, url_text
255
+ except Exception as e:
256
+ return "", f"⚠️ Search error: {e}"
257
+
258
+
259
+ def call_gemini(api_key, prompt, primary_model=GEMINI_MODELS[0], retries=2):
260
+ """Standard LLM execution with strict sanitization to prevent Windows encoding errors."""
261
+ client = genai.Client(api_key=api_key)
262
+ models_to_try = [primary_model] + [m for m in GEMINI_MODELS if m != primary_model]
263
+
264
+ # STIRCTLY strip the prompt to plain ASCII to prevent the httpx library from crashing
265
+ safe_prompt = make_safe(prompt)
266
+
267
+ last_error = None
268
+ for model in models_to_try:
269
+ for attempt in range(retries):
270
+ try:
271
+ response = client.models.generate_content(
272
+ model=model, contents=safe_prompt
273
+ )
274
+ return response.text # Don't strip the output, Gradio needs to show it. Only the OUTBOUND request causes crashes.
275
+ except Exception as e:
276
+ last_error = str(e)
277
+ if "429" in last_error or "quota" in last_error.lower():
278
+ break
279
+ if attempt < retries - 1:
280
+ time.sleep(2 * (attempt + 1))
281
+ continue
282
+ break
283
+ return f"⚠️ Error connecting to Gemini API. Details: {last_error}"
284
+
285
+
286
+ def execute_chart_code(code_str, output_filename="chart.png"):
287
+ match = re.search(r"```python(.*?)```", code_str, re.DOTALL)
288
+ if match:
289
+ code_str = match.group(1).strip()
290
+ code_str = re.sub(
291
+ r"plt\.savefig\(['\"].*?['\"]", f"plt.savefig('{output_filename}'", code_str
292
+ )
293
+ safe_code = (
294
+ "import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n"
295
+ + code_str
296
+ )
297
+ namespace = {"pd": pd, "np": np}
298
+ try:
299
+ exec(safe_code, namespace)
300
+ if os.path.exists(output_filename):
301
+ return output_filename
302
+ except Exception:
303
+ pass
304
+ return None
305
+
306
+
307
+ def generate_visualizations(
308
+ api_key, topic, research_data, num_charts=1, primary_model=GEMINI_MODELS[0]
309
+ ):
310
+ chart_types = [
311
+ ("statistical chart (bar, pie, line, or scatter)", "viz_chart"),
312
+ ("comparison table as an image using matplotlib", "viz_table"),
313
+ ("flowchart or process diagram using matplotlib", "viz_flow"),
314
+ ]
315
+ results = []
316
+ for i in range(min(num_charts, 3)):
317
+ chart_desc, prefix = chart_types[i]
318
+ out_path = os.path.join(VIZ_DIR, f"{prefix}_{int(time.time())}_{i}.png")
319
+ chart_prompt = f"""Write a Python script using matplotlib to create a {chart_desc} based on: '{topic}'.
320
+ Research context: {research_data[:1500]}
321
+ 1. Import matplotlib.pyplot as plt
322
+ 2. Apply a dark theme using plt.style.use('dark_background')
323
+ 3. MUST save the figure as '{out_path}' using plt.savefig('{out_path}', bbox_inches='tight', dpi=150)
324
+ 4. Output ONLY valid python code inside ```python ``` blocks."""
325
+ code_response = call_gemini(api_key, chart_prompt, primary_model=primary_model)
326
+ chart_path = execute_chart_code(code_response, output_filename=out_path)
327
+ if chart_path:
328
+ results.append(chart_path)
329
+ return results
330
+
331
+
332
+ def generate_custom_viz(api_key, viz_prompt, primary_model=GEMINI_MODELS[0]):
333
+ """Generate a standalone custom visualization from sidebar prompt."""
334
+ if not api_key or not viz_prompt:
335
+ return []
336
+
337
+ out_path = os.path.join(VIZ_DIR, f"custom_{int(time.time())}.png")
338
+ chart_prompt = f"""Write a Python script using matplotlib to create a visualization for: '{viz_prompt}'.
339
+ 1. Import matplotlib.pyplot as plt
340
+ 2. Apply a dark theme using plt.style.use('dark_background')
341
+ 3. Make it visually clear and professional.
342
+ 4. MUST save the figure as '{out_path}' using plt.savefig('{out_path}', bbox_inches='tight', dpi=150)
343
+ 5. Output ONLY valid python code inside ```python ``` blocks. No explanations."""
344
+
345
+ code_response = call_gemini(api_key, chart_prompt, primary_model=primary_model)
346
+ chart_path = execute_chart_code(code_response, output_filename=out_path)
347
+ if chart_path:
348
+ return [chart_path]
349
+ return []
350
+
351
+
352
+ def export_report(final_text, surveyed_urls, debate_text):
353
+ if not final_text or final_text.startswith("*The final"):
354
+ return None
355
+ report = f"# Research Report\n\n## Final Intelligence Report\n\n{final_text}\n\n\n\n## Surveyed Resources\n\n{surveyed_urls}\n\n\n\n## Debate Transcript\n\n{debate_text}\n"
356
+ out_path = os.path.join(VIZ_DIR, f"report_{int(time.time())}.md")
357
+ with open(out_path, "w", encoding="utf-8") as f:
358
+ f.write(report)
359
+ return out_path
360
+
361
+
362
+ def clear_outputs():
363
+ return (
364
+ "",
365
+ "*Web URLs will appear here...*",
366
+ "*Debate transcript will stream here...*",
367
+ "*The final synthesis will appear here...*",
368
+ [],
369
+ None,
370
+ )
371
+
372
+
373
+ # 🧠 Multi-Agent Orchestration Workflow
374
+
375
+
376
+ def orchestrate_agents(
377
+ topic, mode, time_limit, num_viz, api_key, primary_model, history
378
+ ):
379
+ if not api_key:
380
+ yield (
381
+ "❌ Error: Please provide a Gemini API Key in the sidebar.",
382
+ "No sites",
383
+ "No debate",
384
+ "Error",
385
+ [],
386
+ history,
387
+ gr.update(),
388
+ "Error",
389
+ )
390
+ return
391
+ if not topic.strip():
392
+ yield (
393
+ "❌ Error: Please enter a research topic.",
394
+ "",
395
+ "",
396
+ "",
397
+ [],
398
+ history,
399
+ gr.update(),
400
+ "Error",
401
+ )
402
+ return
403
+
404
+ log, live_debate = [], ""
405
+
406
+ def update_log(msg):
407
+ log.append(f"βœ… {msg}")
408
+ return "\n".join(log)
409
+
410
+ # 1. Determine Routing
411
+ actual_mode = mode
412
+ if mode == "Auto":
413
+ yield (
414
+ update_log("Auto-Routing: Deciding research depth..."),
415
+ "",
416
+ "",
417
+ "Analyzing topic complexity...",
418
+ [],
419
+ history,
420
+ gr.update(),
421
+ "πŸ”„ Routing...",
422
+ )
423
+ decision = (
424
+ call_gemini(
425
+ api_key,
426
+ f"Analyze: '{topic}'. Quick factual question or complex deep research? Reply 'Quick' or 'Deep'.",
427
+ primary_model=primary_model,
428
+ )
429
+ .strip()
430
+ .lower()
431
+ )
432
+ actual_mode = QUICK_MODE if "quick" in decision else DEEP_MODE
433
+ yield (
434
+ update_log(f"Auto-Routing decided: {actual_mode}"),
435
+ "",
436
+ "",
437
+ "Routing chosen...",
438
+ [],
439
+ history,
440
+ gr.update(),
441
+ f"Mode: {actual_mode}",
442
+ )
443
+
444
+ # 2. Web Grounding Generation
445
+ yield (
446
+ update_log("Agents brainstorming search strategies..."),
447
+ "πŸ’‘ Generating queries...",
448
+ "",
449
+ "Optimizing intents...",
450
+ [],
451
+ history,
452
+ gr.update(),
453
+ "🧠 Thinking...",
454
+ )
455
+ queries_raw = (
456
+ call_gemini(
457
+ api_key,
458
+ f"Topic: '{topic}'. Generate exactly 2 highly effective search queries. Return ONLY queries, one per line.",
459
+ primary_model=primary_model,
460
+ )
461
+ .strip()
462
+ .split("\n")
463
+ )
464
+ search_queries = [
465
+ q.strip(' "-*') for q in queries_raw if q.strip() and "Error" not in q
466
+ ][:2] or [topic]
467
+
468
+ yield (
469
+ update_log("Triggering Google AI Search Grounding..."),
470
+ "πŸ”Ž Extracting context...",
471
+ "",
472
+ "Gathering grounded data...",
473
+ [],
474
+ history,
475
+ gr.update(),
476
+ "🌐 Grounding...",
477
+ )
478
+
479
+ all_broad_data, all_surveyed_urls = "", ""
480
+ for q in search_queries:
481
+ b_data, s_urls = search_web(
482
+ api_key, q, time_limit, primary_model, max_results=3
483
+ )
484
+ if b_data:
485
+ all_broad_data += f"\n\nSource [{q}]:\n" + b_data
486
+ if s_urls and "⚠️" not in s_urls:
487
+ all_surveyed_urls += s_urls + "\n\n"
488
+
489
+ all_surveyed_urls = all_surveyed_urls.strip() or "⚠️ No valid links retrieved."
490
+ yield (
491
+ update_log("Grounding complete."),
492
+ all_surveyed_urls,
493
+ "",
494
+ "Synthesizing...",
495
+ [],
496
+ history,
497
+ gr.update(),
498
+ "πŸ“Š Analyzing...",
499
+ )
500
+
501
+ gallery_images, final_answer = [], ""
502
+
503
+ # 3. Execution
504
+ if actual_mode == QUICK_MODE:
505
+ yield (
506
+ update_log("Executing Quick Direct Answer..."),
507
+ all_surveyed_urls,
508
+ DEBATE_SKIPPED,
509
+ "Drafting final answer...",
510
+ [],
511
+ history,
512
+ gr.update(),
513
+ "✍️ Writing...",
514
+ )
515
+ prompt = f"You are a pragmatic expert. Based on this grounded data: {all_broad_data}. Answer: '{topic}'. Tone: Layman, simple. Provide verified resources."
516
+ final_answer = call_gemini(api_key, prompt, primary_model=primary_model)
517
+ else:
518
+ yield (
519
+ update_log("Deep Research: Agent 1 analyzing..."),
520
+ all_surveyed_urls,
521
+ live_debate,
522
+ "Analyzing...",
523
+ [],
524
+ history,
525
+ gr.update(),
526
+ "πŸ”¬ Agent 1...",
527
+ )
528
+ ra1_findings = call_gemini(
529
+ api_key,
530
+ f"Analyze raw data for '{topic}': {all_broad_data}. Extract core facts.",
531
+ primary_model=primary_model,
532
+ )
533
+
534
+ yield (
535
+ update_log("Deep Research: Agent 2 cross-referencing..."),
536
+ all_surveyed_urls,
537
+ live_debate,
538
+ "Cross-referencing...",
539
+ [],
540
+ history,
541
+ gr.update(),
542
+ "πŸ” Agent 2...",
543
+ )
544
+ deep_data, deep_urls = search_web(
545
+ api_key,
546
+ f"{topic} critical analysis",
547
+ time_limit,
548
+ primary_model,
549
+ max_results=2,
550
+ )
551
+ if deep_urls and "⚠️" not in deep_urls:
552
+ all_surveyed_urls += "\n\n\n\n**Deep Search Results:**\n\n" + deep_urls
553
+ master_research = call_gemini(
554
+ api_key,
555
+ f"Review Agent 1: {ra1_findings}. Cross-reference with: {deep_data}. Output verified master summary.",
556
+ primary_model=primary_model,
557
+ )
558
+
559
+ tone = "Tone: Use simple, layman terms. Be rational and constructive."
560
+ yield (
561
+ update_log("Debate Round 1..."),
562
+ all_surveyed_urls,
563
+ live_debate,
564
+ "Debating...",
565
+ [],
566
+ history,
567
+ gr.update(),
568
+ "βš–οΈ Debate R1...",
569
+ )
570
+ da1_r1 = call_gemini(
571
+ api_key,
572
+ f"Debate AI 1: Propose an answer to '{topic}' using: {master_research}. Under 100 words. {tone}",
573
+ primary_model=primary_model,
574
+ )
575
+ live_debate += f"**πŸ€– AI 1 (Proposal):**\n{da1_r1}\n\n"
576
+ da2_r1 = call_gemini(
577
+ api_key,
578
+ f"Debate AI 2: Review AI 1's draft: {da1_r1}. Point out missing context. Under 100 words. {tone}",
579
+ primary_model=primary_model,
580
+ )
581
+ live_debate += f"**🧐 AI 2 (Critique):**\n{da2_r1}\n\n"
582
+
583
+ yield (
584
+ update_log("Debate Round 2..."),
585
+ all_surveyed_urls,
586
+ live_debate,
587
+ "Debating...",
588
+ [],
589
+ history,
590
+ gr.update(),
591
+ "βš–οΈ Debate R2...",
592
+ )
593
+ da1_r2 = call_gemini(
594
+ api_key,
595
+ f"Debate AI 1: Refine based on AI 2's review: {da2_r1}. Under 100 words. {tone}",
596
+ primary_model=primary_model,
597
+ )
598
+ live_debate += f"**πŸ€– AI 1 (Refinement):**\n{da1_r2}\n\n"
599
+ da2_r2 = call_gemini(
600
+ api_key,
601
+ f"Debate AI 2: Final check on AI 1's revision: {da1_r2}. Under 100 words. {tone}",
602
+ primary_model=primary_model,
603
+ )
604
+ live_debate += f"**🧐 AI 2 (Final Check):**\n{da2_r2}\n\n"
605
+
606
+ yield (
607
+ update_log("Master Orchestrator drafting output..."),
608
+ all_surveyed_urls,
609
+ live_debate,
610
+ "Drafting Final Report...",
611
+ [],
612
+ history,
613
+ gr.update(),
614
+ "πŸ“ Synthesizing...",
615
+ )
616
+ final_prompt = f"""You are the Final Orchestrator. Review this debate for topic '{topic}':
617
+ AI 1: {da1_r2}
618
+ AI 2: {da2_r2}
619
+
620
+ Create the final intelligence report.
621
+ RULES:
622
+ 1. Tone: Simple, layman-friendly. Use examples and analogies.
623
+ 2. Formatting: Beautiful Markdown (headers, bullet points, tables if applicable).
624
+ 3. End with '### πŸ“š Verified Resources' with clickable markdown links."""
625
+ final_answer = call_gemini(api_key, final_prompt, primary_model=primary_model)
626
+
627
+ debate_display = live_debate if actual_mode != QUICK_MODE else DEBATE_SKIPPED
628
+ yield (
629
+ update_log("Final text generated."),
630
+ all_surveyed_urls,
631
+ debate_display,
632
+ final_answer,
633
+ [],
634
+ history,
635
+ gr.update(),
636
+ "βœ… Report ready",
637
+ )
638
+
639
+ # 4. Visualizations
640
+ if num_viz > 0:
641
+ yield (
642
+ update_log(f"Generating {num_viz} visualization(s)..."),
643
+ all_surveyed_urls,
644
+ debate_display,
645
+ final_answer,
646
+ [],
647
+ history,
648
+ gr.update(),
649
+ "πŸ“Š Generating charts...",
650
+ )
651
+ gallery_images = generate_visualizations(
652
+ api_key,
653
+ topic,
654
+ all_broad_data,
655
+ num_charts=num_viz,
656
+ primary_model=primary_model,
657
+ )
658
+ yield (
659
+ update_log(f"{len(gallery_images)} visualization(s) generated!"),
660
+ all_surveyed_urls,
661
+ debate_display,
662
+ final_answer,
663
+ gallery_images,
664
+ history,
665
+ gr.update(),
666
+ "βœ… Charts ready",
667
+ )
668
+
669
+ # 5. Complete
670
+ yield (
671
+ update_log("All Operations Completed Successfully!"),
672
+ all_surveyed_urls,
673
+ debate_display,
674
+ final_answer,
675
+ gallery_images,
676
+ history,
677
+ gr.update(),
678
+ "βœ… Done!",
679
+ )
680
+
681
+ history.append(
682
+ {
683
+ "topic": topic,
684
+ "log": "\n".join(log),
685
+ "urls": all_surveyed_urls,
686
+ "debate": debate_display,
687
+ "final": final_answer,
688
+ "charts": gallery_images,
689
+ }
690
+ )
691
+ yield (
692
+ "\n".join(log),
693
+ all_surveyed_urls,
694
+ debate_display,
695
+ final_answer,
696
+ gallery_images,
697
+ history,
698
+ gr.update(choices=[h["topic"] for h in history]),
699
+ "βœ… Done!",
700
+ )
701
+
702
+
703
+ def load_from_history(selected_topic, history):
704
+ for item in history:
705
+ if item["topic"] == selected_topic:
706
+ return (
707
+ item["log"],
708
+ item["urls"],
709
+ item["debate"],
710
+ item["final"],
711
+ item.get("charts", []),
712
+ )
713
+ return "", "", "", "No history found.", []
714
+
715
+
716
+ # πŸ–₯️ Responsive Dashboard UI
717
+ with gr.Blocks(title="AI Research Hub") as app:
718
+ history_state = gr.State([])
719
+
720
+ gr.Markdown("# πŸ” Multi-Agent Research Hub")
721
+ gr.Markdown(
722
+ "*Native Google AI Grounding Β· Auto-Routing Β· Live Debates Β· Multi-Viz Analytics*"
723
+ )
724
+
725
+ with gr.Row(elem_classes=["main-row"]):
726
+ with gr.Column(scale=1, min_width=220, elem_classes=["sidebar-col"]):
727
+ gr.Markdown("### 🧭 Sidebar")
728
+ with gr.Accordion("πŸ”‘ API Key", open=True):
729
+ api_key = gr.Textbox(
730
+ label="Gemini API Key",
731
+ type="password",
732
+ placeholder="AIzaSy...",
733
+ show_label=False,
734
+ )
735
+ with gr.Accordion("πŸ“‹ Quick Actions", open=True):
736
+ export_btn = gr.Button(
737
+ "πŸ“₯ Export Report", variant="secondary", size="sm"
738
+ )
739
+ export_file = gr.File(label="Download", visible=True, interactive=False)
740
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Outputs", variant="secondary", size="sm")
741
+
742
+ with gr.Accordion("🎨 Custom Visualization", open=False):
743
+ custom_viz_prompt = gr.Textbox(
744
+ label="Describe your chart",
745
+ placeholder="e.g. Pie chart of global energy sources",
746
+ lines=2,
747
+ )
748
+ custom_viz_btn = gr.Button("πŸ“Š Generate", variant="primary", size="sm")
749
+ custom_viz_gallery = gr.Gallery(
750
+ label="Custom Charts",
751
+ columns=1,
752
+ height=200,
753
+ object_fit="contain",
754
+ interactive=False,
755
+ )
756
+
757
+ with gr.Accordion("πŸ•°οΈ History", open=False):
758
+ history_dropdown = gr.Dropdown(label="Past Queries", choices=[])
759
+ load_history_btn = gr.Button("πŸ“‚ Load", variant="secondary", size="sm")
760
+
761
+ with gr.Column(scale=5, min_width=400):
762
+ with gr.Row():
763
+ topic = gr.Textbox(
764
+ label="πŸ” Research Topic",
765
+ placeholder="Enter any topic to research...",
766
+ lines=2,
767
+ scale=3,
768
+ )
769
+ with gr.Column(scale=1, min_width=180):
770
+ model_select = gr.Dropdown(
771
+ choices=GEMINI_MODELS,
772
+ value=GEMINI_MODELS[0],
773
+ label="πŸ€– Primary Model",
774
+ )
775
+ mode = gr.Radio(
776
+ ["Auto", QUICK_MODE, DEEP_MODE], value="Auto", label="🧠 Mode"
777
+ )
778
+
779
+ with gr.Row():
780
+ time_limit = gr.Dropdown(
781
+ ["All time", "Past year", "Past month", "Past week", "Today"],
782
+ value="All time",
783
+ label="πŸ“… Time Cutoff",
784
+ scale=1,
785
+ )
786
+ num_viz = gr.Slider(
787
+ minimum=0,
788
+ maximum=3,
789
+ step=1,
790
+ value=1,
791
+ label="πŸ“Š Visualizations",
792
+ scale=1,
793
+ )
794
+ submit_btn = gr.Button(
795
+ "πŸš€ Start Research", variant="primary", size="lg", scale=1
796
+ )
797
+
798
+ status_bar = gr.Textbox(
799
+ show_label=False,
800
+ interactive=False,
801
+ lines=1,
802
+ placeholder="Ready to research...",
803
+ )
804
+
805
+ with gr.Row(elem_classes=["main-row"]):
806
+ with gr.Column(scale=1, min_width=280):
807
+ with gr.Accordion("πŸ€– Workflow Logs", open=True):
808
+ progress_box = gr.Textbox(
809
+ show_label=False, lines=8, interactive=False
810
+ )
811
+ with gr.Column(scale=1, min_width=280):
812
+ with gr.Accordion("🌐 Grounded Resources", open=True):
813
+ surveyed_sites = gr.Markdown(
814
+ "*Web URLs will appear here...*",
815
+ elem_classes=["surveyed-links"],
816
+ )
817
+
818
+ with gr.Accordion("βš–οΈ Live AI Debate", open=False):
819
+ live_debate = gr.Markdown("*Debate transcript will stream here...*")
820
+
821
+ gr.Markdown("")
822
+ gr.Markdown("### πŸ“‘ Final Intelligence Report")
823
+ final_output = gr.Markdown(
824
+ "*The final synthesis will appear here...*",
825
+ elem_classes=["report-body"],
826
+ )
827
+
828
+ gr.Markdown("")
829
+ gr.Markdown("### πŸ“Š Data Visualizations")
830
+ viz_gallery = gr.Gallery(
831
+ label="Generated Visualizations",
832
+ columns=3,
833
+ height=350,
834
+ object_fit="contain",
835
+ interactive=False,
836
+ elem_classes=["viz-gallery"],
837
+ )
838
+
839
+ submit_btn.click(
840
+ orchestrate_agents,
841
+ inputs=[topic, mode, time_limit, num_viz, api_key, model_select, history_state],
842
+ outputs=[
843
+ progress_box,
844
+ surveyed_sites,
845
+ live_debate,
846
+ final_output,
847
+ viz_gallery,
848
+ history_state,
849
+ history_dropdown,
850
+ status_bar,
851
+ ],
852
+ )
853
+ load_history_btn.click(
854
+ load_from_history,
855
+ inputs=[history_dropdown, history_state],
856
+ outputs=[progress_box, surveyed_sites, live_debate, final_output, viz_gallery],
857
+ )
858
+ export_btn.click(
859
+ export_report,
860
+ inputs=[final_output, surveyed_sites, live_debate],
861
+ outputs=[export_file],
862
+ )
863
+ clear_btn.click(
864
+ clear_outputs,
865
+ outputs=[
866
+ progress_box,
867
+ surveyed_sites,
868
+ live_debate,
869
+ final_output,
870
+ viz_gallery,
871
+ export_file,
872
+ ],
873
+ )
874
+
875
+ custom_viz_btn.click(
876
+ generate_custom_viz,
877
+ inputs=[api_key, custom_viz_prompt, model_select],
878
+ outputs=[custom_viz_gallery],
879
+ )
880
+
881
+ if __name__ == "__main__":
882
+ app.launch(theme=gr.themes.Soft(), css=glassy_css)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio
2
+ google-genai
3
+ ddgs
4
+ matplotlib
5
+ pandas
6
+ pillow
7
+ numpy