Leonardo commited on
Commit
b215c98
·
verified ·
1 Parent(s): e9c8f97

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +386 -693
app.py CHANGED
@@ -1,17 +1,9 @@
1
- """Main application for the OpenDeepResearch Gradio interface."""
2
-
3
- import sys
4
  import mimetypes
5
- import traceback
6
- from dataclasses import dataclass
7
  import os
8
  import re
9
  import shutil
10
- import time
11
- from typing import Optional, Dict, Any
12
- from datetime import datetime
13
 
14
- from cleantext import clean
15
  from dotenv import load_dotenv
16
  from huggingface_hub import login
17
  import gradio as gr
@@ -27,7 +19,6 @@ from scripts.text_web_browser import (
27
  VisitTool,
28
  )
29
  from scripts.visual_qa import visualizer
30
- from scripts.text_cleaner_tool import TextCleanerTool
31
 
32
  from smolagents import (
33
  CodeAgent,
@@ -38,11 +29,11 @@ from smolagents import (
38
  GoogleSearchTool,
39
  Tool,
40
  )
41
- from smolagents.agent_types import AgentText # AgentImage, AgentAudio
42
  from smolagents.gradio_ui import pull_messages_from_step, handle_agent_output_types
43
 
44
-
45
- # Constants and configurations - Converted to UPPER_CASE
46
  AUTHORIZED_IMPORTS = [
47
  "requests", # Web requests (fetching data from the internet)
48
  "zipfile", # Working with ZIP archives
@@ -90,7 +81,6 @@ AUTHORIZED_IMPORTS = [
90
  "schedule", # Allow the agent to schedule tasks
91
  ]
92
 
93
-
94
  USER_AGENT = (
95
  "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
96
  "(KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0"
@@ -124,89 +114,63 @@ ALLOWED_FILE_TYPES = [
124
  "audio/ogg",
125
  ]
126
 
127
- # Maximum chat history length to prevent memory issues
128
- MAX_CHAT_HISTORY = 100
129
- # Maximum uploaded file size in MB
130
- MAX_FILE_SIZE_MB = 50
131
- # File cleanup schedule (in days)
132
- FILE_RETENTION_DAYS = 7
133
-
134
 
135
  def setup_environment():
136
- """
137
- Initialize environment variables and authentication.
138
- Returns:
139
- bool: True if setup was successful, False otherwise
140
- """
141
  load_dotenv(override=True)
142
- hf_token = os.getenv("HF_TOKEN")
143
- if hf_token: # check if token is actually set
144
- try:
145
- login(hf_token)
146
- print("HF_TOKEN (last 10 characters):", hf_token[-10:])
147
- return True
148
- except (ValueError, ConnectionError) as e: # More specific exceptions
149
- print(f"Failed to login with HF token: {e}")
150
- return False
151
  else:
152
  print("HF_TOKEN not found in environment variables.")
153
- return False
154
 
155
 
 
156
  class ModelManager:
157
  """Manages model loading and initialization."""
158
 
159
  @staticmethod
160
  def load_model(chosen_inference: str, model_id: str, key_manager=None):
161
- """
162
- Load the specified model with appropriate configuration.
163
- Args:
164
- chosen_inference: Type of inference to use
165
- model_id: ID of the model to load
166
- key_manager: Optional key manager for API keys
167
- Returns:
168
- Model instance
169
- Raises:
170
- ValueError: If inference type is invalid or required parameters missing
171
- RuntimeError: If model loading fails
172
- """
173
- if chosen_inference == "hf_api":
174
- return HfApiModel(model_id=model_id)
175
- if chosen_inference == "hf_api_provider":
176
- return HfApiModel(provider="together")
177
- if chosen_inference == "litellm":
178
- return LiteLLMModel(model_id=model_id)
179
- if chosen_inference == "openai":
180
- if not key_manager:
181
- raise ValueError("Key manager required for OpenAI model")
182
- return OpenAIServerModel(
183
- model_id=model_id, api_key=key_manager.get_key("openai_api_key")
184
- )
185
- if chosen_inference == "transformers":
186
- return TransformersModel(
187
- model_id="huggingfacetb/smollm2-1.7b-instruct",
188
- device_map="auto",
189
- max_new_tokens=1000,
190
- )
191
- raise ValueError(f"Invalid inference type: {chosen_inference}")
 
192
 
193
 
194
- # This class only has one public method, but that's acceptable for a registry class
195
- # whose purpose is to provide factory methods
196
  class ToolRegistry:
197
  """Manages tool initialization and organization."""
198
 
199
  @staticmethod
200
  def load_web_tools(model, browser, text_limit=20000):
201
- """
202
- Initialize and return web-related tools.
203
- Args:
204
- model: LLM model for text inspector
205
- browser: Browser instance for web tools
206
- text_limit: Maximum text length for processing
207
- Returns:
208
- List of web tools
209
- """
210
  return [
211
  GoogleSearchTool(provider="serper"),
212
  VisitTool(browser),
@@ -220,710 +184,439 @@ class ToolRegistry:
220
 
221
  @staticmethod
222
  def load_image_generation_tools():
223
- """
224
- Initialize and return image generation tools.
225
- Returns:
226
- Image generation tool
227
- Raises:
228
- RuntimeError: If tool initialization fails
229
- """
230
  try:
231
  return Tool.from_space(
232
- space_id="xkerser/flux.1-dev",
233
  name="image_generator",
234
- description=(
235
- "Generates high-quality AgentImage. "
236
- "With text prompt (77 token limit)."
237
- ),
238
  )
239
- except (
240
- ConnectionError,
241
- ValueError,
242
- RuntimeError,
243
- ) as e: # More specific exceptions
244
- print(f" Couldn't initialize image generation tool: {e}")
245
- raise RuntimeError(f"Image generation tool initialization failed: {e}")
246
-
247
- @staticmethod
248
- def load_clean_text_tool():
249
- """
250
- Initialize and return text cleaning tool.
251
- Returns:
252
- Text cleaning tool
253
- Raises:
254
- RuntimeError: If tool initialization fails
255
- """
256
- try:
257
- return TextCleanerTool()
258
- except (ValueError, RuntimeError) as e: # More specific exceptions
259
- print(f" Couldn't initialize clean text tool: {e}")
260
- raise RuntimeError(f"Clean text tool initialization failed: {e}")
261
 
262
 
 
263
  def create_agent():
264
- """
265
- Creates a fresh agent instance with properly configured tools.
266
- Returns:
267
- CodeAgent: Configured agent ready for use
268
- Raises:
269
- ValueError: If tool validation fails
270
- RuntimeError: If agent creation fails
271
- """
272
- try:
273
- # Initialize model
274
- model = LiteLLMModel(
275
- custom_role_conversions=CUSTOM_ROLE_CONVERSIONS,
276
- model_id="openrouter/deepseek/deepseek-chat-v3-0324:free",
277
- )
278
-
279
- # Initialize tools
280
- text_limit = 30000
281
- browser = SimpleTextBrowser(**BROWSER_CONFIG)
282
-
283
- # Collect all tools in a single list
284
- web_tools = ToolRegistry.load_web_tools(model, browser, text_limit)
285
- image_generator = ToolRegistry.load_image_generation_tools()
286
- clean_text = TextCleanerTool()
287
-
288
- # Combine all tools into a single list
289
- all_tools = [visualizer] + web_tools + [image_generator, clean_text]
290
-
291
- # Validate tools before creating agent
292
- for tool in all_tools:
293
- if not isinstance(tool, Tool):
294
- raise ValueError(
295
- f"Invalid tool type: {type(tool)}. "
296
- f"All tools must be instances of Tool class."
297
- )
298
 
299
- return CodeAgent(
300
- model=model,
301
- tools=all_tools,
302
- max_steps=12,
303
- verbosity_level=2,
304
- additional_authorized_imports=AUTHORIZED_IMPORTS,
305
- planning_interval=4,
306
- )
307
- except (ValueError, RuntimeError) as e:
308
- print(f"Failed to create agent: {e}")
309
- raise RuntimeError(f"Agent creation failed: {e}")
310
-
311
-
312
- # Define standalone functions outside of classes
313
- def process_message_content(content_lower: str) -> Dict[str, bool]:
314
- """
315
- Process message content to determine message type.
316
- Args:
317
- content_lower: Lowercase message content
318
- Returns:
319
- Dictionary with message type flags
320
- """
321
- return {
322
- "is_document_analysis": "document analysis" in content_lower,
323
- "is_search": "search" in content_lower,
324
- "is_error": "error" in content_lower,
325
- }
326
 
327
 
328
  def stream_to_gradio(
329
  agent,
330
  task: str,
331
  reset_agent_memory: bool = False,
332
- additional_args: Optional[Dict] = None,
333
  ):
334
- """
335
- Streams agent responses with improved status indicators.
336
- Args:
337
- agent: The agent instance to use
338
- task: The task to perform
339
- reset_agent_memory: Whether to reset agent memory
340
- additional_args: Optional additional arguments
341
- Yields:
342
- Gradio ChatMessage objects
343
- """
344
- try:
345
- # Initial processing indicator
346
- yield gr.ChatMessage(role="assistant", content="⏳ Processing your request...")
347
-
348
- # Track what we've yielded to replace the processing indicator
349
- first_message_yielded = False
350
-
351
- # Store the step_log outside the loop to avoid the undefined-loop-variable issue
352
- steps = list(
353
- agent.run(
354
- task,
355
- stream=True,
356
- reset=reset_agent_memory,
357
- additional_args=additional_args,
358
- )
359
- )
360
-
361
- # If no steps were returned, handle it gracefully
362
- if not steps:
363
- yield gr.ChatMessage(
364
- role="assistant", content="⚠️ No response from agent. Please try again."
365
- )
366
- return
367
-
368
- # Process each step
369
- for step_log in steps:
370
- # pull_messages_from_step is a generator function that yields messages
371
- for message in pull_messages_from_step(step_log):
372
- if not first_message_yielded:
373
- # Replace the initial "processing" message
374
- first_message_yielded = True
375
- message.content = message.content.replace(
376
- "⏳ Processing your request...", ""
377
- )
378
-
379
- # Check message content for document analysis or search references
380
- if hasattr(message, "content") and message.content:
381
- content_lower = message.content.lower()
382
- message_types = process_message_content(content_lower)
383
-
384
- if message_types["is_document_analysis"]:
385
- message.content = f"📄 **Document Analysis:** {message.content}"
386
- elif message_types["is_search"]:
387
- message.content = f"🔍 **Search:** {message.content}"
388
-
389
- yield message
390
-
391
- # Final answer with enhanced formatting
392
- if steps: # Make sure we have at least one step before accessing
393
- final_answer = handle_agent_output_types(steps[-1]) # Use the last step
394
- if isinstance(final_answer, AgentText):
395
- yield gr.ChatMessage(
396
- role="assistant",
397
- content=f"✅ **Final Answer:**\n{final_answer.to_string()}",
398
- )
399
- else:
400
- yield gr.ChatMessage(
401
- role="assistant",
402
- content=f"✅ **Final Answer:** {str(final_answer)}",
403
- )
404
-
405
- except (ValueError, RuntimeError) as e:
406
- # More specific error handling
407
  yield gr.ChatMessage(
408
  role="assistant",
409
- content=(
410
- f"❌ **Error:** {str(e)}\n" f"Please try again with a different query."
411
- ),
412
  )
413
- except Exception as e: # Fallback for truly unexpected errors
414
- print(f"Unexpected error in stream_to_gradio: {e}")
415
- traceback.print_exc()
416
  yield gr.ChatMessage(
417
  role="assistant",
418
- content=(
419
- "❌ **Unexpected Error:** An unknown error occurred.\n"
420
- "Please try again or contact support if the issue persists."
421
- ),
 
 
 
 
 
 
422
  )
423
 
424
 
425
- # This is a helper method that can be called statically
426
- def cleanup_old_files(directory: str, days: int = FILE_RETENTION_DAYS):
427
- """
428
- Removes files older than the specified number of days.
429
- Args:
430
- directory: Directory to clean up
431
- days: Number of days to keep files
432
- """
433
- if not os.path.exists(directory):
434
- return
435
-
436
- cutoff_time = time.time() - (days * 24 * 60 * 60)
437
- for filename in os.listdir(directory):
438
- file_path = os.path.join(directory, filename)
439
- if os.path.isfile(file_path):
440
- file_mod_time = os.path.getmtime(file_path)
441
- if file_mod_time < cutoff_time:
442
- try:
443
- os.remove(file_path)
444
- print(f"Deleted old file: {file_path}")
445
- except (PermissionError, OSError) as e:
446
- print(f"Failed to delete {file_path}: {str(e)}")
447
-
448
-
449
- @dataclass
450
- class UIComponents:
451
- """Container for UI components to reduce main class attribute count."""
452
-
453
- text_input: Any = None
454
- submit_btn: Any = None
455
- stop_btn: Any = None
456
- clear_btn: Any = None
457
- status: Any = None
458
- chatbot: Any = None
459
- file_uploader: Any = None # renamed from upload_file to avoid conflict
460
- upload_status: Any = None
461
-
462
-
463
  class GradioUI:
464
- """Gradio user interface for the OpenDeepResearch application."""
465
 
466
- def __init__(self, file_upload_folder=None, max_queue_size=50):
467
- """Initialize the Gradio UI."""
468
- # Basic configuration
469
  self.file_upload_folder = file_upload_folder
470
- self.max_queue_size = max_queue_size
471
- self.max_chat_history = MAX_CHAT_HISTORY
472
- self.max_file_size_mb = MAX_FILE_SIZE_MB
473
 
474
- # Initialize UI components container
475
- self.components = UIComponents()
476
-
477
- # Job handle for cancellation
478
- self.job = None
479
-
480
- # Create upload directory if specified
481
- if self.file_upload_folder is not None: # Simplified if expression
482
- os.makedirs(file_upload_folder, exist_ok=True)
483
-
484
- # Clean up old files
485
- if file_upload_folder:
486
- cleanup_old_files(file_upload_folder)
487
 
488
  def interact_with_agent(self, prompt, messages, session_state):
489
- """
490
- Main interaction handler with the agent.
491
- Args:
492
- prompt: User input prompt
493
- messages: Current message history
494
- session_state: Session state dictionary
495
- Yields:
496
- Updated message history
497
- """
498
  # Get or create session-specific agent
499
  if "agent" not in session_state:
500
- try:
501
- session_state["agent"] = create_agent()
502
- except RuntimeError as e:
503
- messages.append(
504
- gr.ChatMessage(
505
- role="assistant", content=f"Failed to create agent: {str(e)}"
506
- )
507
- )
508
- yield messages
509
- return
510
 
 
511
  try:
512
  # Log the existence of agent memory
513
  has_memory = hasattr(session_state["agent"], "memory")
514
  print(f"Agent has memory: {has_memory}")
515
- if has_memory and hasattr(session_state["agent"].memory, "steps"):
516
- print(f"Memory steps: {len(session_state['agent'].memory.steps)}")
517
 
518
- # Truncate messages if they exceed the maximum
519
- if len(messages) > self.max_chat_history:
520
- # Keep only the latest messages
521
- messages = messages[-self.max_chat_history :]
522
-
523
- # Add user message
524
  messages.append(gr.ChatMessage(role="user", content=prompt))
525
  yield messages
526
 
527
- # Process with agent and stream responses
528
  for msg in stream_to_gradio(
529
  session_state["agent"], task=prompt, reset_agent_memory=False
530
  ):
531
  messages.append(msg)
532
- yield messages
 
533
 
534
- except ValueError as e:
535
- print(f"Value error in interaction: {str(e)}")
536
- messages.append(
537
- gr.ChatMessage(role="assistant", content=f"Input error: {str(e)}")
538
- )
539
- yield messages
540
  except Exception as e:
541
  print(f"Error in interaction: {str(e)}")
542
- traceback.print_exc()
543
- messages.append(
544
- gr.ChatMessage(role="assistant", content=f"Error occurred: {str(e)}")
545
- )
546
- yield messages
547
 
548
- def handle_file_upload(self, files, file_uploads_log):
549
- """
550
- Handle file uploads with proper validation and security.
551
- Args:
552
- files: Files to upload
553
- file_uploads_log: List of uploaded files
554
- Returns:
555
- Tuple of (status textbox, updated file_uploads_log, updated upload button visibility)
556
- """
557
- if not files:
558
- return (
559
- gr.Textbox(value="No file uploaded", visible=True),
560
- file_uploads_log,
561
- )
562
 
563
  try:
564
- # Process the file (files[0] since we're using file_count="single")
565
- file = files[0]
566
-
567
- # Validate file exists
568
- if not os.path.exists(file.name):
569
- return (
570
- gr.Textbox(value="File not found", visible=True),
571
- file_uploads_log,
572
- )
573
 
574
- # Check file size
575
- file_size_mb = os.path.getsize(file.name) / (1024 * 1024)
576
- if file_size_mb > self.max_file_size_mb:
577
- return (
578
- gr.Textbox(
579
- value=f"File size exceeds {self.max_file_size_mb} MB limit.",
580
- visible=True,
581
- ),
582
- file_uploads_log,
583
- )
584
 
585
- # Validate mime type
586
- mime_type, _ = mimetypes.guess_type(file.name)
587
- if mime_type not in ALLOWED_FILE_TYPES:
588
- return (
589
- gr.Textbox(value="File type disallowed", visible=True),
590
- file_uploads_log,
591
- )
592
 
593
- # Sanitize file name
594
- original_name = os.path.basename(file.name)
595
- # Replace invalid chars with underscores
596
- sanitized_name = re.sub(r"[^\w\-.]", "_", original_name)
597
- # Add timestamp to ensure uniqueness
598
- timestamp = datetime.now().strftime(
599
- "%y%m%d_%H%M%S"
600
- ) # Correct format string
601
- name_parts = os.path.splitext(sanitized_name)
602
- sanitized_name = f"{name_parts[0]}_{timestamp}{name_parts[1]}"
603
-
604
- # Save the uploaded file to the specified folder
605
- file_path = os.path.join(self.file_upload_folder, sanitized_name)
606
- shutil.copy(file.name, file_path)
607
 
608
- return (
609
- gr.Textbox(value=f"File uploaded: {original_name}", visible=True),
610
- file_uploads_log + [file_path],
611
- )
612
 
613
- except FileNotFoundError as e:
614
- return (
615
- gr.Textbox(value=f"File not found: {str(e)}", visible=True),
616
- file_uploads_log,
617
- )
618
- except PermissionError as e:
619
- return (
620
- gr.Textbox(value=f"Permission denied: {str(e)}", visible=True),
621
- file_uploads_log,
622
- )
623
- except (IOError, OSError) as e:
624
- return (
625
- gr.Textbox(value=f"I/O error during upload: {str(e)}", visible=True),
626
- file_uploads_log,
627
- )
628
- except Exception as e:
629
- # For truly unexpected errors, log with more detail
630
- print(f"Unexpected upload error: {e}")
631
- traceback.print_exc()
632
  return (
633
- gr.Textbox(value=f"Error processing upload: {str(e)}", visible=True),
 
 
634
  file_uploads_log,
635
  )
636
 
637
- def log_user_message(self, text_input, file_uploads_log):
638
- """
639
- Process user message and handle file references.
640
- Args:
641
- text_input: User's text input
642
- file_uploads_log: List of uploaded files
643
- Returns:
644
- Tuple of (processed message, updated text input, submit button)
645
- """
646
- if not text_input.strip():
647
- return (
648
- "",
649
- gr.Textbox(value="", interactive=True),
650
- gr.Button(interactive=True),
651
- )
652
 
653
- # Only clean if necessary (avoid unnecessary processing)
 
654
  message = text_input
655
- if any(char in text_input for char in "€¥£-"):
656
- message = clean(
657
- text_input,
658
- fix_unicode=True,
659
- to_ascii=True,
660
- lower=False, # Keep original case
661
- no_line_breaks=False,
662
- no_urls=False,
663
- no_emails=False,
664
- no_phone_numbers=False,
665
- no_numbers=False,
666
- no_digits=False,
667
- no_currency_symbols=False,
668
- no_punct=False,
669
- lang="en",
670
- )
671
 
672
- # Add file references if any
673
- if file_uploads_log:
674
- files_info = "\n".join(
675
- [f"- {os.path.basename(f)}" for f in file_uploads_log]
676
- )
677
- message += f"\nYou have been provided with these files:\n{files_info}"
678
 
679
  return (
680
  message,
681
  gr.Textbox(
682
  value="",
683
  interactive=False,
684
- placeholder="Processing your request...",
685
  ),
686
  gr.Button(interactive=False),
687
  )
688
 
689
- def clear_chat(self):
690
- """
691
- Clear the chat history and reset UI elements.
692
- Returns:
693
- Tuple of (empty chat history, interactive text input, interactive button, empty status)
694
- """
695
- return (
696
- [], # Empty chat history
697
- [], # Empty stored messages
698
- gr.Textbox(value="", interactive=True),
699
- gr.Button(interactive=True),
700
- gr.Textbox(value="", visible=False), # Clear status
701
- )
 
 
 
 
 
 
 
 
 
 
 
702
 
703
- def launch(self, share=False, **kwargs):
704
- """
705
- Launch the Gradio UI with responsive layout.
706
- Args:
707
- share: Whether to create a public link
708
- **kwargs: Additional keyword arguments for launch
709
- """
710
  with gr.Blocks(theme="ocean", fill_height=True) as demo:
711
- # Use Gradio's built-in responsive layout
712
- with gr.Row():
713
- # Sidebar (smaller on mobile)
714
- with gr.Column(scale=1, min_width=100):
715
- gr.Markdown(
716
- """# OpenDeepResearch
717
- AI-powered research assistant using SmoLAgents
718
- Model: deepseek/deepseek-chat-v3-0324:free"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
719
  )
 
720
 
721
- with gr.Group():
722
- gr.Markdown("**Research Query**", container=True)
723
- self.components.text_input = gr.Textbox(
724
- lines=3,
725
- label="Your request",
726
- placeholder="Enter your research question or task",
727
- container=False,
728
- )
729
-
730
- with gr.Row():
731
- self.components.submit_btn = gr.Button(
732
- "Run", variant="primary"
733
- )
734
- self.components.stop_btn = gr.Button("Stop", variant="stop")
735
- self.components.clear_btn = gr.Button(
736
- "Clear", variant="secondary"
737
- )
738
-
739
- # File upload in collapsible section
740
- if self.file_upload_folder is not None:
741
- with gr.Accordion("Upload Files", open=False):
742
- self.components.file_uploader = gr.UploadButton(
743
- "Upload a file",
744
- file_count="single",
745
- file_types=["pdf", "docx", "txt", "md", "json"],
746
- )
747
- self.components.upload_status = gr.Textbox(
748
- label="Upload status", interactive=False, visible=False
749
- )
750
-
751
- # Tool information
752
- with gr.Accordion("Available Tools", open=False):
753
- gr.Markdown(
754
- """
755
- - **Web Search**: Find information online
756
- - **Document Analysis**: Analyze uploaded documents
757
- - **Text Cleaning**: Format and clean text
758
- - **Image Generation**: Create images from descriptions
759
- """
760
- )
761
-
762
- gr.HTML("<br><h5>Powered by:</h5>")
763
- with gr.Row():
764
- gr.HTML(
765
- """
766
- <div style="display: flex; align-items: center; gap: 8px;
767
- font-family: system-ui, -apple-system, sans-serif;">
768
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png"
769
- style="width: 32px; height: 32px; object-fit: contain;"
770
- alt="logo">
771
- <a target="_blank" href="https://github.com/huggingface/smolagents">
772
- <b>huggingface/smolagents</b>
773
- </a>
774
- </div>
775
- """
776
- )
777
-
778
- # Main chat area (larger)
779
- with gr.Column(scale=3, min_width=500):
780
- # Add session state to store session-specific data
781
- session_state = gr.State({})
782
- stored_messages = gr.State([])
783
  file_uploads_log = gr.State([])
784
-
785
- # Chat interface
786
- self.components.chatbot = gr.Chatbot(
787
- label="Research Assistant",
788
- type="messages",
789
- avatar_images=(
790
- None,
791
- "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
792
- ),
793
- height=600,
794
- elem_id="research-chatbot",
795
  )
796
 
797
- # Status indicator
798
- self.components.status = gr.Textbox(
799
- "", label="Status", interactive=False, visible=False
 
 
 
 
 
 
 
 
 
800
  )
801
 
802
- # Connect event handlers with appropriate cancellation
803
- # File upload handler - Updated for UploadButton
804
- if hasattr(self.components, "file_uploader") and hasattr(
805
- self.components, "upload_status"
806
- ):
807
- self.components.file_uploader.upload(
808
- self.handle_file_upload,
809
- [self.components.file_uploader, file_uploads_log],
810
- [self.components.upload_status, file_uploads_log],
811
- )
812
-
813
- # Text input handler with cancellation
814
- submit_event = (
815
- self.components.text_input.submit(
816
- self.log_user_message,
817
- [self.components.text_input, file_uploads_log],
818
- [
819
- stored_messages,
820
- self.components.text_input,
821
- self.components.submit_btn,
822
- ],
823
- )
824
- .then(
825
- self.interact_with_agent,
826
- [stored_messages, self.components.chatbot, session_state],
827
- [self.components.chatbot],
828
- )
829
- .then(
830
- lambda: (
831
- gr.Textbox(interactive=True),
832
- gr.Button(interactive=True),
833
- ),
834
- None,
835
- [self.components.text_input, self.components.submit_btn],
836
- )
837
- )
838
 
839
- # Button click handler with same flow
840
- click_event = (
841
- self.components.submit_btn.click(
842
- self.log_user_message,
843
- [self.components.text_input, file_uploads_log],
844
- [
845
- stored_messages,
846
- self.components.text_input,
847
- self.components.submit_btn,
848
- ],
849
- )
850
- .then(
851
- self.interact_with_agent,
852
- [stored_messages, self.components.chatbot, session_state],
853
- [self.components.chatbot],
854
- )
855
- .then(
856
- lambda: (
857
- gr.Textbox(interactive=True),
858
- gr.Button(interactive=True),
859
- ),
860
- None,
861
- [self.components.text_input, self.components.submit_btn],
862
- )
863
- )
864
 
865
- # Stop button cancels ongoing operations
866
- self.components.stop_btn.click(
867
- None, None, None, cancels=[submit_event, click_event]
868
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
869
 
870
- # Clear button
871
- self.components.clear_btn.click(
872
- self.clear_chat,
873
- None,
874
- [
875
- self.components.chatbot,
876
- stored_messages,
877
- self.components.text_input,
878
- self.components.submit_btn,
879
- self.components.status,
880
- ],
881
- )
882
 
883
- # Launch with fixed queue settings (avoiding the problematic parameter)
884
- demo.launch(
885
- share=share,
886
- debug=True,
887
  )
 
888
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
890
  def main():
891
- """
892
- Main entry point for the application.
893
- Returns:
894
- int: Exit code (0 for success, 1 for failure)
895
- """
896
- try:
897
- # Initialize environment
898
- if not setup_environment():
899
- print("Failed to set up environment properly.")
900
- return 1
901
-
902
- # Ensure downloads folder exists
903
- downloads_folder = BROWSER_CONFIG["downloads_folder"]
904
- os.makedirs(f"./{downloads_folder}", exist_ok=True)
905
-
906
- # Create uploads folder
907
- uploads_folder = "uploaded_files"
908
- os.makedirs(uploads_folder, exist_ok=True)
909
-
910
- # Launch UI
911
- print("Starting OpenDeepResearch Gradio interface...")
912
- gradio_ui = GradioUI(file_upload_folder=uploads_folder)
913
- gradio_ui.launch()
914
-
915
- return 0
916
-
917
- except KeyError as e:
918
- print(f"Configuration error: Missing key {e}")
919
- traceback.print_exc()
920
- return 1
921
- except Exception as e:
922
- print(f"Application failed to start: {e}")
923
- traceback.print_exc()
924
- return 1
925
 
926
 
927
  if __name__ == "__main__":
928
- EXIT_CODE = main() # UPPER_CASE for constants
929
- sys.exit(EXIT_CODE) # Use sys.exit instead of exit
 
 
 
 
1
  import mimetypes
 
 
2
  import os
3
  import re
4
  import shutil
5
+ from typing import Optional
 
 
6
 
 
7
  from dotenv import load_dotenv
8
  from huggingface_hub import login
9
  import gradio as gr
 
19
  VisitTool,
20
  )
21
  from scripts.visual_qa import visualizer
 
22
 
23
  from smolagents import (
24
  CodeAgent,
 
29
  GoogleSearchTool,
30
  Tool,
31
  )
32
+ from smolagents.agent_types import AgentText, AgentImage, AgentAudio
33
  from smolagents.gradio_ui import pull_messages_from_step, handle_agent_output_types
34
 
35
+ # ------------------------ Configuration and Setup ------------------------
36
+ # Constants and configurations
37
  AUTHORIZED_IMPORTS = [
38
  "requests", # Web requests (fetching data from the internet)
39
  "zipfile", # Working with ZIP archives
 
81
  "schedule", # Allow the agent to schedule tasks
82
  ]
83
 
 
84
  USER_AGENT = (
85
  "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
86
  "(KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0"
 
114
  "audio/ogg",
115
  ]
116
 
 
 
 
 
 
 
 
117
 
118
  def setup_environment():
119
+ """Initialize environment variables and authentication."""
 
 
 
 
120
  load_dotenv(override=True)
121
+ if os.getenv("HF_TOKEN"): # Check if token is actually set
122
+ login(os.getenv("HF_TOKEN"))
123
+ print("HF_TOKEN (last 10 characters):", os.getenv("HF_TOKEN")[-10:])
 
 
 
 
 
 
124
  else:
125
  print("HF_TOKEN not found in environment variables.")
 
126
 
127
 
128
+ # ------------------------ Model and Tool Management ------------------------
129
  class ModelManager:
130
  """Manages model loading and initialization."""
131
 
132
  @staticmethod
133
  def load_model(chosen_inference: str, model_id: str, key_manager=None):
134
+ """Load the specified model with appropriate configuration."""
135
+ try:
136
+ if chosen_inference == "hf_api":
137
+ return HfApiModel(model_id=model_id)
138
+
139
+ elif chosen_inference == "hf_api_provider":
140
+ return HfApiModel(provider="together")
141
+
142
+ elif chosen_inference == "litellm":
143
+ return LiteLLMModel(model_id=model_id)
144
+
145
+ elif chosen_inference == "openai":
146
+ if not key_manager:
147
+ raise ValueError("Key manager required for OpenAI model")
148
+
149
+ return OpenAIServerModel(
150
+ model_id=model_id, api_key=key_manager.get_key("openai_api_key")
151
+ )
152
+
153
+ elif chosen_inference == "transformers":
154
+ return TransformersModel(
155
+ model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct",
156
+ device_map="auto",
157
+ max_new_tokens=1000,
158
+ )
159
+
160
+ else:
161
+ raise ValueError(f"Invalid inference type: {chosen_inference}")
162
+
163
+ except Exception as e:
164
+ print(f" Couldn't load model: {e}")
165
+ raise
166
 
167
 
 
 
168
  class ToolRegistry:
169
  """Manages tool initialization and organization."""
170
 
171
  @staticmethod
172
  def load_web_tools(model, browser, text_limit=20000):
173
+ """Initialize and return web-related tools."""
 
 
 
 
 
 
 
 
174
  return [
175
  GoogleSearchTool(provider="serper"),
176
  VisitTool(browser),
 
184
 
185
  @staticmethod
186
  def load_image_generation_tools():
187
+ """Initialize and return image generation tools."""
 
 
 
 
 
 
188
  try:
189
  return Tool.from_space(
190
+ space_id="xkerser/FLUX.1-dev",
191
  name="image_generator",
192
+ description="Generates high-quality AgentImage using the FLUX.1-dev model based on text prompts.",
 
 
 
193
  )
194
+ except Exception as e:
195
+ print(f"✗ Couldn't initialize image generation tool: {e}")
196
+ raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
 
199
+ # ------------------------ Agent Creation and Execution ------------------------
200
  def create_agent():
201
+ """Creates a fresh agent instance with properly configured tools."""
202
+ # Initialize model
203
+ model = LiteLLMModel(
204
+ custom_role_conversions=CUSTOM_ROLE_CONVERSIONS,
205
+ model_id="openrouter/google/gemini-2.0-flash-001", # currently serving:
206
+ ) # DEEPSEEK = openrouter/perplexity/r1-1776 <--- boss model
207
+
208
+ # Initialize tools
209
+ text_limit = 30000
210
+ browser = SimpleTextBrowser(**BROWSER_CONFIG)
211
+
212
+ # Collect all tools in a single list
213
+ web_tools = ToolRegistry.load_web_tools(model, browser, text_limit)
214
+ image_generator = ToolRegistry.load_image_generation_tools()
215
+
216
+ # Combine all tools into a single list (not a tuple)
217
+ all_tools = [visualizer] + web_tools + [image_generator]
218
+
219
+ # Validate tools before creating agent
220
+ for tool in all_tools:
221
+ if not isinstance(tool, Tool):
222
+ raise ValueError(
223
+ f"Invalid tool type: {type(tool)}. All tools must be instances of Tool class."
224
+ )
 
 
 
 
 
 
 
 
 
 
225
 
226
+ return CodeAgent(
227
+ model=model,
228
+ tools=all_tools, # Pass a single list containing all tools
229
+ max_steps=10,
230
+ verbosity_level=1,
231
+ additional_authorized_imports=AUTHORIZED_IMPORTS,
232
+ planning_interval=4,
233
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
 
235
 
236
  def stream_to_gradio(
237
  agent,
238
  task: str,
239
  reset_agent_memory: bool = False,
240
+ additional_args: Optional[dict] = None,
241
  ):
242
+ """Runs an agent with the given task and streams messages as Gradio ChatMessages."""
243
+ for step_log in agent.run(
244
+ task, stream=True, reset=reset_agent_memory, additional_args=additional_args
245
+ ):
246
+ for message in pull_messages_from_step(step_log):
247
+ yield message
248
+
249
+ # Process final answer : Use a more comprehensive media output
250
+ final_answer = step_log # Last log is the run's final_answer
251
+ final_answer = handle_agent_output_types(final_answer)
252
+
253
+ if isinstance(final_answer, AgentText):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
  yield gr.ChatMessage(
255
  role="assistant",
256
+ content=f"**Final answer:**\n{final_answer.to_string()}\n",
 
 
257
  )
258
+ elif isinstance(final_answer, AgentImage):
 
 
259
  yield gr.ChatMessage(
260
  role="assistant",
261
+ content={"image": final_answer.to_string(), "type": "file"},
262
+ ) # Send as Gradio-compatible file object:
263
+ elif isinstance(final_answer, AgentAudio):
264
+ yield gr.ChatMessage(
265
+ role="assistant",
266
+ content={"audio": final_answer.to_string(), "type": "file"},
267
+ ) # Send as Gradio-compatible file object
268
+ else:
269
+ yield gr.ChatMessage(
270
+ role="assistant", content=f"**Final answer:** {str(final_answer)}"
271
  )
272
 
273
 
274
+ # ------------------------ Gradio UI Components ------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  class GradioUI:
276
+ """A one-line interface to launch your agent in Gradio."""
277
 
278
+ def __init__(self, file_upload_folder: str | None = None):
279
+ """Initialize the Gradio UI with optional file upload functionality."""
 
280
  self.file_upload_folder = file_upload_folder
 
 
 
281
 
282
+ if self.file_upload_folder is not None:
283
+ if not os.path.exists(file_upload_folder):
284
+ os.mkdir(file_upload_folder)
 
 
 
 
 
 
 
 
 
 
285
 
286
  def interact_with_agent(self, prompt, messages, session_state):
287
+ """Main interaction handler with the agent."""
288
+
 
 
 
 
 
 
 
289
  # Get or create session-specific agent
290
  if "agent" not in session_state:
291
+ session_state["agent"] = create_agent()
 
 
 
 
 
 
 
 
 
292
 
293
+ # Adding monitoring
294
  try:
295
  # Log the existence of agent memory
296
  has_memory = hasattr(session_state["agent"], "memory")
297
  print(f"Agent has memory: {has_memory}")
298
+ if has_memory:
299
+ print(f"Memory type: {type(session_state['agent'].memory)}")
300
 
 
 
 
 
 
 
301
  messages.append(gr.ChatMessage(role="user", content=prompt))
302
  yield messages
303
 
 
304
  for msg in stream_to_gradio(
305
  session_state["agent"], task=prompt, reset_agent_memory=False
306
  ):
307
  messages.append(msg)
308
+ yield messages # Yield messages after each step
309
+ yield messages # Yield messages one last time
310
 
 
 
 
 
 
 
311
  except Exception as e:
312
  print(f"Error in interaction: {str(e)}")
313
+ raise
 
 
 
 
314
 
315
+ def upload_file(
316
+ self,
317
+ file,
318
+ file_uploads_log,
319
+ ):
320
+ """Handle file uploads with proper validation and security."""
321
+ if file is None:
322
+ return gr.Textbox("No file uploaded", visible=True), file_uploads_log
 
 
 
 
 
 
323
 
324
  try:
325
+ mime_type, _ = mimetypes.guess_type(file.name)
326
+ except Exception as e:
327
+ return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
 
 
 
 
 
 
328
 
329
+ if mime_type not in ALLOWED_FILE_TYPES:
330
+ return gr.Textbox("File type disallowed", visible=True), file_uploads_log
 
 
 
 
 
 
 
 
331
 
332
+ # Sanitize file name
333
+ original_name = os.path.basename(file.name)
334
+ sanitized_name = re.sub(
335
+ r"[^\w\-.]", "_", original_name
336
+ ) # Replace invalid chars with underscores
 
 
337
 
338
+ # Ensure the extension correlates to the mime type
339
+ type_to_ext = {}
340
+ for ext, t in mimetypes.types_map.items():
341
+ if t not in type_to_ext:
342
+ type_to_ext[t] = ext
 
 
 
 
 
 
 
 
 
343
 
344
+ # Build sanitized filename with proper extension
345
+ name_parts = sanitized_name.split(".")[:-1]
346
+ extension = type_to_ext.get(mime_type, "")
347
+ sanitized_name = "".join(name_parts) + extension
348
 
349
+ # Limit File Size, and Throw Error
350
+ max_file_size_mb = 50 # Define the limit
351
+ file_size_mb = os.path.getsize(file.name) / (1024 * 1024) # Size in MB
352
+
353
+ if file_size_mb > max_file_size_mb:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
  return (
355
+ gr.Textbox(
356
+ f"File size exceeds {max_file_size_mb} MB limit.", visible=True
357
+ ),
358
  file_uploads_log,
359
  )
360
 
361
+ # Save the uploaded file to the specified folder
362
+ file_path = os.path.join(self.file_upload_folder, sanitized_name)
363
+ shutil.copy(file.name, file_path)
364
+
365
+ return gr.Textbox(
366
+ f"File uploaded: {file_path}", visible=True
367
+ ), file_uploads_log + [file_path]
 
 
 
 
 
 
 
 
368
 
369
+ def log_user_message(self, text_input, file_uploads_log):
370
+ """Process user message and handle file references."""
371
  message = text_input
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
 
373
+ if len(file_uploads_log) > 0:
374
+ message += f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}" # Added file list
 
 
 
 
375
 
376
  return (
377
  message,
378
  gr.Textbox(
379
  value="",
380
  interactive=False,
381
+ placeholder="Processing...", # Changed placeholder.
382
  ),
383
  gr.Button(interactive=False),
384
  )
385
 
386
+ def detect_device(self, request: gr.Request):
387
+ """Detect whether the user is on mobile or desktop device."""
388
+ if not request:
389
+ return "Unknown device" # Handle case where request is none.
390
+
391
+ # Method 1: Check sec-ch-ua-mobile header
392
+ is_mobile_header = request.headers.get("sec-ch-ua-mobile")
393
+ if is_mobile_header:
394
+ return "Mobile" if "?1" in is_mobile_header else "Desktop"
395
+
396
+ # Method 2: Check user-agent string
397
+ user_agent = request.headers.get("user-agent", "").lower()
398
+ mobile_keywords = ["android", "iphone", "ipad", "mobile", "phone"]
399
+
400
+ if any(keyword in user_agent for keyword in mobile_keywords):
401
+ return "Mobile"
402
+
403
+ # Method 3: Check platform
404
+ platform = request.headers.get("sec-ch-ua-platform", "").lower()
405
+ if platform:
406
+ if platform in ['"android"', '"ios"']:
407
+ return "Mobile"
408
+ if platform in ['"windows"', '"macos"', '"linux"']:
409
+ return "Desktop"
410
 
411
+ # Default case if no clear indicators
412
+ return "Desktop"
413
+
414
+ def launch(self, **kwargs):
415
+ """Launch the Gradio UI with responsive layout."""
 
 
416
  with gr.Blocks(theme="ocean", fill_height=True) as demo:
417
+ # Different layouts for mobile and computer devices
418
+ @gr.render()
419
+ def layout(request: gr.Request):
420
+ device = self.detect_device(request)
421
+ print(f"device - {device}")
422
+ # Render layout with sidebar
423
+ if device == "Desktop":
424
+ return self._create_desktop_layout()
425
+ return self._create_mobile_layout()
426
+
427
+ demo.queue(max_size=20).launch(
428
+ debug=True, **kwargs
429
+ ) # Add queue with reasonable size
430
+
431
+ def _create_desktop_layout(self):
432
+ """Create the desktop layout with sidebar."""
433
+ with gr.Blocks(fill_height=True) as sidebar_demo:
434
+ with gr.Sidebar():
435
+ gr.Markdown(
436
+ """#OpenDeepResearch - 3theSmolagents!
437
+ Model_id: google/gemini-2.0-flash-001"""
438
+ )
439
+ with gr.Group():
440
+ gr.Markdown("**What's on your mind mate?**", container=True)
441
+ text_input = gr.Textbox(
442
+ lines=3,
443
+ label="Your request",
444
+ container=False,
445
+ placeholder="Enter your prompt here and press Shift+Enter or press the button",
446
  )
447
+ launch_research_btn = gr.Button("Run", variant="primary")
448
 
449
+ # If an upload folder is provided, enable the upload feature
450
+ if self.file_upload_folder is not None:
451
+ upload_file = gr.File(label="Upload a file")
452
+ upload_status = gr.Textbox(
453
+ label="Upload Status", interactive=False, visible=False
454
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455
  file_uploads_log = gr.State([])
456
+ upload_file.change(
457
+ self.upload_file,
458
+ [upload_file, file_uploads_log],
459
+ [upload_status, file_uploads_log],
 
 
 
 
 
 
 
460
  )
461
 
462
+ gr.HTML("<br><br><h4><center>Powered by:</center></h4>")
463
+ with gr.Row():
464
+ gr.HTML(
465
+ """
466
+ <div style="display: flex; align-items: center; gap: 8px; font-family: system-ui, -apple-system, sans-serif;">
467
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png"
468
+ style="width: 32px; height: 32px; object-fit: contain;" alt="logo">
469
+ <a target="_blank" href="https://github.com/huggingface/smolagents">
470
+ <b>huggingface/smolagents</b>
471
+ </a>
472
+ </div>
473
+ """
474
  )
475
 
476
+ # Add session state to store session-specific data
477
+ session_state = gr.State({}) # Initialize empty state for each session
478
+ stored_messages = gr.State([])
479
+ if "file_uploads_log" not in locals():
480
+ file_uploads_log = gr.State([])
481
+
482
+ chatbot = gr.Chatbot(
483
+ label="open-Deep-Research",
484
+ type="messages",
485
+ avatar_images=(
486
+ None,
487
+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
488
+ ),
489
+ resizeable=False,
490
+ scale=1,
491
+ elem_id="my-chatbot",
492
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
 
494
+ self._connect_event_handlers(
495
+ text_input,
496
+ launch_research_btn,
497
+ file_uploads_log,
498
+ stored_messages,
499
+ chatbot,
500
+ session_state,
501
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
502
 
503
+ return sidebar_demo
504
+
505
+ def _create_mobile_layout(self):
506
+ """Create the mobile layout (simpler without sidebar)."""
507
+ with gr.Blocks(fill_height=True) as simple_demo:
508
+ gr.Markdown("""#OpenDeepResearch - free the AI agents!""")
509
+ # Add session state to store session-specific data
510
+ session_state = gr.State({})
511
+ stored_messages = gr.State([])
512
+ file_uploads_log = gr.State([])
513
+
514
+ chatbot = gr.Chatbot(
515
+ label="open-Deep-Research",
516
+ type="messages",
517
+ avatar_images=(
518
+ None,
519
+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
520
+ ),
521
+ resizeable=True,
522
+ scale=1,
523
+ )
524
 
525
+ # If an upload folder is provided, enable the upload feature
526
+ if self.file_upload_folder is not None:
527
+ upload_file = gr.File(label="Upload a file")
528
+ upload_status = gr.Textbox(
529
+ label="Upload Status", interactive=False, visible=False
530
+ )
531
+ upload_file.change(
532
+ self.upload_file,
533
+ [upload_file, file_uploads_log],
534
+ [upload_status, file_uploads_log],
535
+ )
 
536
 
537
+ text_input = gr.Textbox(
538
+ lines=1,
539
+ label="What's on your mind mate?",
540
+ placeholder="Chuck in a question and we'll take care of the rest",
541
  )
542
+ launch_research_btn = gr.Button("Run", variant="primary")
543
 
544
+ self._connect_event_handlers(
545
+ text_input,
546
+ launch_research_btn,
547
+ file_uploads_log,
548
+ stored_messages,
549
+ chatbot,
550
+ session_state,
551
+ )
552
+
553
+ return simple_demo
554
+
555
+ def _connect_event_handlers(
556
+ self,
557
+ text_input,
558
+ launch_research_btn,
559
+ file_uploads_log,
560
+ stored_messages,
561
+ chatbot,
562
+ session_state,
563
+ ):
564
+ """Connect the event handlers for input elements."""
565
+ # Connect text input submit event
566
+ text_input.submit(
567
+ self.log_user_message,
568
+ [text_input, file_uploads_log],
569
+ [stored_messages, text_input, launch_research_btn],
570
+ ).then(
571
+ self.interact_with_agent,
572
+ [stored_messages, chatbot, session_state],
573
+ [chatbot],
574
+ ).then(
575
+ lambda: (
576
+ gr.Textbox(
577
+ interactive=True,
578
+ placeholder="Enter your prompt here and press the button",
579
+ ),
580
+ gr.Button(interactive=True),
581
+ ),
582
+ None,
583
+ [text_input, launch_research_btn],
584
+ )
585
 
586
+ # Connect button click event
587
+ launch_research_btn.click(
588
+ self.log_user_message,
589
+ [text_input, file_uploads_log],
590
+ [stored_messages, text_input, launch_research_btn],
591
+ ).then(
592
+ self.interact_with_agent,
593
+ [stored_messages, chatbot, session_state],
594
+ [chatbot],
595
+ ).then(
596
+ lambda: (
597
+ gr.Textbox(
598
+ interactive=True,
599
+ placeholder="Enter your prompt here and press the button",
600
+ ),
601
+ gr.Button(interactive=True),
602
+ ),
603
+ None,
604
+ [text_input, launch_research_btn],
605
+ )
606
+
607
+
608
+ # ------------------------ Execution ------------------------
609
  def main():
610
+ """Main entry point for the application."""
611
+ # Initialize environment
612
+ setup_environment()
613
+
614
+ # Ensure downloads folder exists
615
+ os.makedirs(f"./{BROWSER_CONFIG['downloads_folder']}", exist_ok=True)
616
+
617
+ # Launch UI
618
+ GradioUI(file_upload_folder="uploaded_files").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619
 
620
 
621
  if __name__ == "__main__":
622
+ main()