Leonardo commited on
Commit
cb7be2d
·
verified ·
1 Parent(s): 57d4e61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +659 -442
app.py CHANGED
@@ -1,10 +1,15 @@
1
  """Main application for the OpenDeepResearch Gradio interface."""
2
 
 
3
  import mimetypes
 
 
4
  import os
5
  import re
6
  import shutil
7
- from typing import Optional
 
 
8
 
9
  from cleantext import clean
10
  from dotenv import load_dotenv
@@ -37,7 +42,7 @@ from smolagents.agent_types import AgentText # AgentImage, AgentAudio
37
  from smolagents.gradio_ui import pull_messages_from_step, handle_agent_output_types
38
 
39
 
40
- # Constants and configurations
41
  AUTHORIZED_IMPORTS = [
42
  "requests", # Web requests (fetching data from the internet)
43
  "zipfile", # Working with ZIP archives
@@ -81,11 +86,8 @@ AUTHORIZED_IMPORTS = [
81
  "selenium", # Automated browser control (for dynamic websites)
82
  # Database interaction (if needed) - Handle credentials securely!
83
  "sqlite3", # SQLite database access
84
- # "psycopg2", # PostgreSQL adapter if needed
85
  # Task scheduling
86
  "schedule", # Allow the agent to schedule tasks
87
- # Networking
88
- # "socket", # Networking
89
  ]
90
 
91
 
@@ -110,28 +112,45 @@ ALLOWED_FILE_TYPES = [
110
  "application/pdf",
111
  "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
112
  "text/plain",
113
- "text/markdown", # Added Markdown support
114
- "application/json", # Added JSON support
115
  "image/png",
116
  "image/webp",
117
- "image/jpeg", # Added JPEG support
118
- "image/gif", # Added GIF support
119
  "video/mp4",
120
- "audio/mpeg", # Added MP3 support
121
- "audio/wav", # Added WAV support
122
- "audio/ogg", # Added OGG support
123
  ]
124
 
 
 
 
 
 
 
 
125
 
126
  def setup_environment():
127
- """Initialize environment variables and authentication."""
 
 
 
 
128
  load_dotenv(override=True)
129
  hf_token = os.getenv("HF_TOKEN")
130
- if hf_token: # Check if token is actually set
131
- login(hf_token)
132
- print("HF_TOKEN (last 10 characters):", hf_token[-10:])
 
 
 
 
 
133
  else:
134
  print("HF_TOKEN not found in environment variables.")
 
135
 
136
 
137
  class ModelManager:
@@ -139,43 +158,55 @@ class ModelManager:
139
 
140
  @staticmethod
141
  def load_model(chosen_inference: str, model_id: str, key_manager=None):
142
- """Load the specified model with appropriate configuration."""
143
- try:
144
- if chosen_inference == "hf_api":
145
- return HfApiModel(model_id=model_id)
146
-
147
- if chosen_inference == "hf_api_provider":
148
- return HfApiModel(provider="together")
149
-
150
- if chosen_inference == "litellm":
151
- return LiteLLMModel(model_id=model_id)
152
-
153
- if chosen_inference == "openai":
154
- if not key_manager:
155
- raise ValueError("Key manager required for OpenAI model")
156
- return OpenAIServerModel(
157
- model_id=model_id, api_key=key_manager.get_key("openai_api_key")
158
- )
159
-
160
- if chosen_inference == "transformers":
161
- return TransformersModel(
162
- model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct",
163
- device_map="auto",
164
- max_new_tokens=1000,
165
- )
166
-
167
- raise ValueError(f"Invalid inference type: {chosen_inference}")
168
- except (ValueError, RuntimeError) as e: # More specific exceptions
169
- print(f"Model loading failed: {e}")
170
- raise
 
 
171
 
172
 
 
 
173
  class ToolRegistry:
174
  """Manages tool initialization and organization."""
175
 
176
  @staticmethod
177
  def load_web_tools(model, browser, text_limit=20000):
178
- """Initialize and return web-related tools."""
 
 
 
 
 
 
 
 
179
  return [
180
  GoogleSearchTool(provider="serper"),
181
  VisitTool(browser),
@@ -189,76 +220,126 @@ class ToolRegistry:
189
 
190
  @staticmethod
191
  def load_image_generation_tools():
192
- """Initialize and return image generation tools."""
 
 
 
 
 
 
193
  try:
194
  return Tool.from_space(
195
- space_id="xkerser/FLUX.1-dev",
196
  name="image_generator",
197
  description=(
198
  "Generates high-quality AgentImage. "
199
- "with text prompt (77 token limit)."
200
  ),
201
  )
202
- except Exception as e:
203
- print(f"✗ Couldn't initialize image generation tool: {e}")
204
- raise
 
 
 
 
205
 
206
  @staticmethod
207
  def load_clean_text_tool():
208
- """Initialize and return text cleaning tool."""
 
 
 
 
 
 
209
  try:
210
  return TextCleanerTool()
211
- except Exception as e:
212
- print(f" Couldn't initialize clean text tool: {e}")
213
- raise
214
 
215
 
216
  def create_agent():
217
- """Creates a fresh agent instance with properly configured tools."""
218
- # Initialize model
219
- model = LiteLLMModel(
220
- custom_role_conversions=CUSTOM_ROLE_CONVERSIONS,
221
- # Currently serving:
222
- model_id="openrouter/anthropic/claude-3.7-sonnet",
223
- ) # DEEPSEEK = openrouter/perplexity/r1-1776 <--- boss model
224
-
225
- # Initialize tools
226
- text_limit = 20000
227
- browser = SimpleTextBrowser(**BROWSER_CONFIG)
228
-
229
- # Collect all tools in a single list
230
- web_tools = ToolRegistry.load_web_tools(model, browser, text_limit)
231
- image_generator = ToolRegistry.load_image_generation_tools()
232
- clean_text = TextCleanerTool() # Instantiate TextCleanerTool
233
-
234
- # Combine all tools into a single list
235
- all_tools = [visualizer] + web_tools + [image_generator, clean_text]
236
-
237
- # Validate tools before creating agent
238
- for tool in all_tools:
239
- if not isinstance(tool, Tool):
240
- raise ValueError(
241
- f"Invalid tool type: {type(tool)}. "
242
- f"All tools must be instances of Tool class."
243
- )
244
 
245
- return CodeAgent(
246
- model=model,
247
- tools=all_tools, # Pass a single list containing all tools
248
- max_steps=10,
249
- verbosity_level=1,
250
- additional_authorized_imports=AUTHORIZED_IMPORTS,
251
- planning_interval=4,
252
- )
253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
 
255
  def stream_to_gradio(
256
  agent,
257
  task: str,
258
  reset_agent_memory: bool = False,
259
- additional_args: Optional[dict] = None,
260
  ):
261
- """Streams agent responses with improved status indicators."""
 
 
 
 
 
 
 
 
 
262
  try:
263
  # Initial processing indicator
264
  yield gr.ChatMessage(role="assistant", content="⏳ Processing your request...")
@@ -266,447 +347,583 @@ def stream_to_gradio(
266
  # Track what we've yielded to replace the processing indicator
267
  first_message_yielded = False
268
 
269
- for step_log in agent.run(
270
- task, stream=True, reset=reset_agent_memory, additional_args=additional_args
271
- ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
  # pull_messages_from_step is a generator function that yields messages
273
- # We need to iterate through each yielded message
274
  for message in pull_messages_from_step(step_log):
275
  if not first_message_yielded:
276
- # Replace the initial "Processing" message
277
  first_message_yielded = True
278
  message.content = message.content.replace(
279
  "⏳ Processing your request...", ""
280
  )
281
 
282
  # Check message content for document analysis or search references
283
- content_lower = (
284
- message.content.lower() if hasattr(message, "content") else ""
285
- )
286
 
287
- if "document analysis" in content_lower:
288
- message.content = f"📄 **Document Analysis:** {message.content}"
289
- elif "search" in content_lower:
290
- message.content = f"🔍 **Search:** {message.content}"
291
 
292
  yield message
293
 
294
  # Final answer with enhanced formatting
295
- final_answer = handle_agent_output_types(step_log)
296
-
297
- if isinstance(final_answer, AgentText):
298
- yield gr.ChatMessage(
299
- role="assistant",
300
- content=f"✅ **Final Answer:**\n\n{final_answer.to_string()}",
301
- )
302
- else:
303
- yield gr.ChatMessage(
304
- role="assistant", content=f"✅ **Final Answer:** {str(final_answer)}"
305
- )
 
306
 
307
- except Exception as e:
 
308
  yield gr.ChatMessage(
309
  role="assistant",
310
  content=(
311
- f"❌ **Error:** {str(e)}\n\n"
312
- f"Please try again with a different query."
313
  ),
314
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
 
316
 
317
  class GradioUI:
 
 
318
  def __init__(self, file_upload_folder=None, max_queue_size=50):
319
- # Initialize all attributes here
 
320
  self.file_upload_folder = file_upload_folder
321
  self.max_queue_size = max_queue_size
322
- self.text_input = None
323
- self.submit_btn = None
324
- self.stop_btn = None
325
- self.clear_btn = None
326
- self.status = None
327
- self.chatbot = None
328
- self.session_state = None
329
  self.job = None
330
 
331
- if self.file_upload_folder is not None:
 
332
  os.makedirs(file_upload_folder, exist_ok=True)
333
 
334
- def interact_with_agent(self, prompt, messages, session_state):
335
- """Main interaction handler with the agent."""
 
336
 
 
 
 
 
 
 
 
 
 
 
337
  # Get or create session-specific agent
338
  if "agent" not in session_state:
339
- session_state["agent"] = create_agent()
 
 
 
 
 
 
 
 
 
340
 
341
- # Adding monitoring
342
  try:
343
  # Log the existence of agent memory
344
  has_memory = hasattr(session_state["agent"], "memory")
345
  print(f"Agent has memory: {has_memory}")
346
- if has_memory:
347
- print(f"Memory type: {type(session_state['agent'].memory)}")
348
 
 
 
 
 
 
 
349
  messages.append(gr.ChatMessage(role="user", content=prompt))
350
  yield messages
351
 
 
352
  for msg in stream_to_gradio(
353
  session_state["agent"], task=prompt, reset_agent_memory=False
354
  ):
355
  messages.append(msg)
356
- yield messages # Yield messages after each step
357
- yield messages # Yield messages one last time
358
 
 
 
 
 
 
 
359
  except Exception as e:
360
  print(f"Error in interaction: {str(e)}")
361
- raise
362
-
363
- def upload_file(
364
- self,
365
- file,
366
- file_uploads_log,
367
- ):
368
- """Handle file uploads with proper validation and security."""
 
 
 
 
 
 
 
369
  if file is None:
370
- return gr.Textbox("No file uploaded", visible=True), file_uploads_log
371
 
372
  try:
373
- mime_type, _ = mimetypes.guess_type(file.name)
374
- except Exception as e:
375
- return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
376
-
377
- if mime_type not in ALLOWED_FILE_TYPES:
378
- return gr.Textbox("File type disallowed", visible=True), file_uploads_log
379
 
380
- # Sanitize file name
381
- original_name = os.path.basename(file.name)
382
- # Replace invalid chars with underscores
383
- sanitized_name = re.sub(r"[^\w\-.]", "_", original_name)
 
 
 
 
 
 
384
 
385
- # Ensure the extension correlates to the mime type
386
- type_to_ext = {}
387
- for ext, t in mimetypes.types_map.items():
388
- if t not in type_to_ext:
389
- type_to_ext[t] = ext
 
 
390
 
391
- # Build sanitized filename with proper extension
392
- name_parts = sanitized_name.split(".")[:-1]
393
- extension = type_to_ext.get(mime_type, "")
394
- sanitized_name = "".join(name_parts) + extension
 
 
 
 
 
 
 
 
 
 
395
 
396
- # Limit File Size, and Throw Error
397
- max_file_size_mb = 50 # Define the limit
398
- file_size_mb = os.path.getsize(file.name) / (1024 * 1024) # Size in MB
 
399
 
400
- if file_size_mb > max_file_size_mb:
401
  return (
402
- gr.Textbox(
403
- f"File size exceeds {max_file_size_mb} MB limit.", visible=True
404
- ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  file_uploads_log,
406
  )
407
-
408
- # Save the uploaded file to the specified folder
409
- file_path = os.path.join(self.file_upload_folder, sanitized_name)
410
- shutil.copy(file.name, file_path)
411
-
412
- return gr.Textbox(
413
- f"File uploaded: {file_path}", visible=True
414
- ), file_uploads_log + [file_path]
415
 
416
  def log_user_message(self, text_input, file_uploads_log):
417
- """Process user message and handle file references."""
418
-
419
- cleaned_message = clean(
420
- text_input,
421
- fix_unicode=True,
422
- to_ascii=True,
423
- lower=True,
424
- no_line_breaks=False,
425
- no_urls=False,
426
- no_emails=False,
427
- no_phone_numbers=False,
428
- no_numbers=False,
429
- no_digits=False,
430
- no_currency_symbols=False,
431
- no_punct=False,
432
- lang="en",
433
- ) # Can change default behaviour with TextCleanerTool
434
-
435
- message = cleaned_message # Use the cleaned message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
 
 
437
  if file_uploads_log:
438
- # Added file list to message
439
- message += (
440
- f"\nYou have been provided with these files, which might be "
441
- f"helpful or not: {file_uploads_log}"
442
  )
 
443
 
444
  return (
445
  message,
446
  gr.Textbox(
447
  value="",
448
  interactive=False,
449
- placeholder="Processing...", # Changed placeholder.
450
  ),
451
  gr.Button(interactive=False),
452
  )
453
 
454
- def detect_device(self, request: gr.Request):
455
- """Detect whether the user is on mobile or desktop device."""
456
- if not request:
457
- return "Unknown device" # Handle case where request is none.
458
-
459
- # Method 1: Check sec-ch-ua-mobile header
460
- is_mobile_header = request.headers.get("sec-ch-ua-mobile")
461
- if is_mobile_header:
462
- return "Mobile" if "?1" in is_mobile_header else "Desktop"
463
-
464
- # Method 2: Check user-agent string
465
- user_agent = request.headers.get("user-agent", "").lower()
466
- mobile_keywords = ["android", "iphone", "ipad", "mobile", "phone"]
467
-
468
- if any(keyword in user_agent for keyword in mobile_keywords):
469
- return "Mobile"
470
-
471
- # Method 3: Check platform
472
- platform = request.headers.get("sec-ch-ua-platform", "").lower()
473
- if platform:
474
- if platform in ['"android"', '"ios"']:
475
- return "Mobile"
476
- return "Desktop"
477
-
478
- # Default case if no clear indicators
479
- return "Desktop"
480
 
481
- def launch(self, **kwargs):
482
- """Launch the Gradio UI with responsive layout."""
 
 
 
 
 
483
  with gr.Blocks(theme="ocean", fill_height=True) as demo:
484
- # Different layouts for mobile and computer devices
485
- @gr.render()
486
- def layout(request: gr.Request):
487
- device = self.detect_device(request)
488
- print(f"device - {device}")
489
- # Render layout with sidebar
490
- if device == "Desktop":
491
- return self._create_desktop_layout()
492
- else:
493
- return self._create_mobile_layout()
494
-
495
- demo.queue(max_size=20).launch(
496
- debug=True, **kwargs
497
- ) # Add queue with reasonable size
498
-
499
- def _create_desktop_layout(self):
500
- """Create the desktop layout with sidebar."""
501
- with gr.Blocks(fill_height=True) as sidebar_demo:
502
- with gr.Sidebar():
503
- gr.Markdown(
504
- """#OpenDeepResearch - 3theSmolagents!
505
- Model_id: anthropic/claude-3.7-sonnet"""
506
- )
507
- with gr.Group():
508
- gr.Markdown("**What's on your mind mate?**", container=True)
509
- text_input = gr.Textbox(
510
- lines=3,
511
- label="Your request",
512
- container=False,
513
- placeholder=(
514
- "Enter your prompt here and press Shift+Enter or "
515
- "press the button"
516
- ),
517
  )
518
- launch_research_btn = gr.Button("Run", variant="primary")
519
 
520
- # If an upload folder is provided, enable the upload feature
521
- if self.file_upload_folder is not None:
522
- upload_file = gr.File(label="Upload a file")
523
- upload_status = gr.Textbox(
524
- label="Upload Status", interactive=False, visible=False
525
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
526
  file_uploads_log = gr.State([])
527
- upload_file.change(
528
- self.upload_file,
529
- [upload_file, file_uploads_log],
530
- [upload_status, file_uploads_log],
531
- )
532
 
533
- gr.HTML("<br><br><h4><center>Powered by:</center></h4>")
534
- with gr.Row():
535
- gr.HTML(
536
- """
537
- <div style="display: flex; align-items: center; gap: 8px;
538
- font-family: system-ui, -apple-system, sans-serif;">
539
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png"
540
- style="width: 32px; height: 32px; object-fit: contain;"
541
- alt="logo">
542
- <a target="_blank" href="https://github.com/huggingface/smolagents">
543
- <b>huggingface/smolagents</b>
544
- </a>
545
- </div>
546
- """
547
  )
548
 
549
- # Add session state to store session-specific data
550
- # Initialize empty state for each session
551
- session_state = gr.State({})
552
- stored_messages = gr.State([])
553
- if "file_uploads_log" not in locals():
554
- file_uploads_log = gr.State([])
555
-
556
- chatbot = gr.Chatbot(
557
- label="ODR",
558
- type="messages",
559
- avatar_images=(
560
- None,
561
- "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
562
- ),
563
- resizeable=False,
564
- scale=1,
565
- elem_id="my-chatbot",
566
- )
567
-
568
- self._connect_event_handlers(
569
- text_input,
570
- launch_research_btn,
571
- file_uploads_log,
572
- stored_messages,
573
- chatbot,
574
- session_state,
575
- )
576
-
577
- return sidebar_demo
578
-
579
- def _create_mobile_layout(self):
580
- """Create the mobile layout (simpler without sidebar)."""
581
- with gr.Blocks(fill_height=True) as simple_demo:
582
- gr.Markdown("""#OpenDeepResearch - free the AI agents!""")
583
- # Add session state to store session-specific data
584
- session_state = gr.State({})
585
- stored_messages = gr.State([])
586
- file_uploads_log = gr.State([])
587
-
588
- chatbot = gr.Chatbot(
589
- label="ODR",
590
- type="messages",
591
- avatar_images=(
592
- None,
593
- "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
594
- ),
595
- resizeable=True,
596
- scale=1,
597
- )
598
-
599
- # If an upload folder is provided, enable the upload feature
600
- if self.file_upload_folder is not None:
601
- upload_file = gr.File(label="Upload a file")
602
- upload_status = gr.Textbox(
603
- label="Upload Status", interactive=False, visible=False
604
- )
605
- upload_file.change(
606
- self.upload_file,
607
- [upload_file, file_uploads_log],
608
- [upload_status, file_uploads_log],
609
- )
610
-
611
- text_input = gr.Textbox(
612
- lines=1,
613
- label="What's on your mind mate?",
614
- placeholder="Chuck in a question and we'll take care of the rest",
615
- )
616
- launch_research_btn = gr.Button("Run", variant="primary")
617
 
618
- self._connect_event_handlers(
619
- text_input,
620
- launch_research_btn,
621
- file_uploads_log,
622
- stored_messages,
623
- chatbot,
624
- session_state,
625
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
626
 
627
- return simple_demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
628
 
629
- def _create_common_ui_elements(self):
630
- """Create common UI elements with control buttons."""
631
- with gr.Group():
632
- self.text_input = gr.Textbox(
633
- lines=3,
634
- label="Your request",
635
- placeholder="Enter your question about the documents...",
636
- elem_classes=["prompt-box"],
637
- )
638
 
639
- with gr.Row():
640
- self.submit_btn = gr.Button("Run", variant="primary")
641
- self.stop_btn = gr.Button("Stop Generation", variant="stop")
642
- self.clear_btn = gr.Button("Clear Chat", variant="secondary")
 
 
 
 
 
 
 
 
643
 
644
- # Status indicator for document processing
645
- self.status = gr.Textbox(
646
- "", label="Status", interactive=False, visible=True
 
 
 
 
 
 
 
 
647
  )
648
 
649
- def _connect_event_handlers(
650
- self,
651
- text_input,
652
- launch_research_btn,
653
- file_uploads_log,
654
- stored_messages,
655
- chatbot,
656
- session_state,
657
- ):
658
- """Connect event handlers with appropriate parameters."""
659
- # Define the job handler for stopping generation
660
- self.job = None
661
-
662
- def start_processing(prompt, chat_history):
663
- # We'll use the passed components directly rather than self.status
664
- return prompt, chat_history
665
-
666
- def stop_generation():
667
- if self.job:
668
- self.job.cancel()
669
-
670
- def clear_chat():
671
- return [], gr.Textbox(interactive=True), gr.Button(interactive=True), ""
672
-
673
- # Connect text input submission
674
- text_input.submit(
675
- self.log_user_message,
676
- [text_input, file_uploads_log],
677
- [stored_messages, text_input, launch_research_btn],
678
- ).then(
679
- self.interact_with_agent,
680
- [stored_messages, chatbot, session_state],
681
- [chatbot],
682
- )
683
-
684
- # Connect button click
685
- launch_research_btn.click(
686
- self.log_user_message,
687
- [text_input, file_uploads_log],
688
- [stored_messages, text_input, launch_research_btn],
689
- ).then(
690
- self.interact_with_agent,
691
- [stored_messages, chatbot, session_state],
692
- [chatbot],
693
- )
694
-
695
- # Store the job for cancellation if needed
696
- self.job = None # This would need to be assigned to an actual event
697
-
698
 
699
  def main():
700
- """Main entry point for the application."""
701
- # Initialize environment
702
- setup_environment()
703
-
704
- # Ensure downloads folder exists
705
- os.makedirs(f"./{BROWSER_CONFIG['downloads_folder']}", exist_ok=True)
706
-
707
- # Launch UI
708
- GradioUI(file_upload_folder="uploaded_files").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709
 
710
 
711
  if __name__ == "__main__":
712
- main()
 
 
1
  """Main application for the OpenDeepResearch Gradio interface."""
2
 
3
+ import sys
4
  import mimetypes
5
+ import traceback
6
+ from dataclasses import dataclass
7
  import os
8
  import re
9
  import shutil
10
+ import time
11
+ from typing import Optional, Dict, Any
12
+ from datetime import datetime
13
 
14
  from cleantext import clean
15
  from dotenv import load_dotenv
 
42
  from smolagents.gradio_ui import pull_messages_from_step, handle_agent_output_types
43
 
44
 
45
+ # Constants and configurations - Converted to UPPER_CASE
46
  AUTHORIZED_IMPORTS = [
47
  "requests", # Web requests (fetching data from the internet)
48
  "zipfile", # Working with ZIP archives
 
86
  "selenium", # Automated browser control (for dynamic websites)
87
  # Database interaction (if needed) - Handle credentials securely!
88
  "sqlite3", # SQLite database access
 
89
  # Task scheduling
90
  "schedule", # Allow the agent to schedule tasks
 
 
91
  ]
92
 
93
 
 
112
  "application/pdf",
113
  "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
114
  "text/plain",
115
+ "text/markdown",
116
+ "application/json",
117
  "image/png",
118
  "image/webp",
119
+ "image/jpeg",
120
+ "image/gif",
121
  "video/mp4",
122
+ "audio/mpeg",
123
+ "audio/wav",
124
+ "audio/ogg",
125
  ]
126
 
127
+ # Maximum chat history length to prevent memory issues
128
+ MAX_CHAT_HISTORY = 100
129
+ # Maximum uploaded file size in MB
130
+ MAX_FILE_SIZE_MB = 50
131
+ # File cleanup schedule (in days)
132
+ FILE_RETENTION_DAYS = 7
133
+
134
 
135
  def setup_environment():
136
+ """
137
+ Initialize environment variables and authentication.
138
+ Returns:
139
+ bool: True if setup was successful, False otherwise
140
+ """
141
  load_dotenv(override=True)
142
  hf_token = os.getenv("HF_TOKEN")
143
+ if hf_token: # check if token is actually set
144
+ try:
145
+ login(hf_token)
146
+ print("HF_TOKEN (last 10 characters):", hf_token[-10:])
147
+ return True
148
+ except (ValueError, ConnectionError) as e: # More specific exceptions
149
+ print(f"Failed to login with HF token: {e}")
150
+ return False
151
  else:
152
  print("HF_TOKEN not found in environment variables.")
153
+ return False
154
 
155
 
156
  class ModelManager:
 
158
 
159
  @staticmethod
160
  def load_model(chosen_inference: str, model_id: str, key_manager=None):
161
+ """
162
+ Load the specified model with appropriate configuration.
163
+ Args:
164
+ chosen_inference: Type of inference to use
165
+ model_id: ID of the model to load
166
+ key_manager: Optional key manager for API keys
167
+ Returns:
168
+ Model instance
169
+ Raises:
170
+ ValueError: If inference type is invalid or required parameters missing
171
+ RuntimeError: If model loading fails
172
+ """
173
+ if chosen_inference == "hf_api":
174
+ return HfApiModel(model_id=model_id)
175
+ if chosen_inference == "hf_api_provider":
176
+ return HfApiModel(provider="together")
177
+ if chosen_inference == "litellm":
178
+ return LiteLLMModel(model_id=model_id)
179
+ if chosen_inference == "openai":
180
+ if not key_manager:
181
+ raise ValueError("Key manager required for OpenAI model")
182
+ return OpenAIServerModel(
183
+ model_id=model_id, api_key=key_manager.get_key("openai_api_key")
184
+ )
185
+ if chosen_inference == "transformers":
186
+ return TransformersModel(
187
+ model_id="huggingfacetb/smollm2-1.7b-instruct",
188
+ device_map="auto",
189
+ max_new_tokens=1000,
190
+ )
191
+ raise ValueError(f"Invalid inference type: {chosen_inference}")
192
 
193
 
194
+ # This class only has one public method, but that's acceptable for a registry class
195
+ # whose purpose is to provide factory methods
196
  class ToolRegistry:
197
  """Manages tool initialization and organization."""
198
 
199
  @staticmethod
200
  def load_web_tools(model, browser, text_limit=20000):
201
+ """
202
+ Initialize and return web-related tools.
203
+ Args:
204
+ model: LLM model for text inspector
205
+ browser: Browser instance for web tools
206
+ text_limit: Maximum text length for processing
207
+ Returns:
208
+ List of web tools
209
+ """
210
  return [
211
  GoogleSearchTool(provider="serper"),
212
  VisitTool(browser),
 
220
 
221
  @staticmethod
222
  def load_image_generation_tools():
223
+ """
224
+ Initialize and return image generation tools.
225
+ Returns:
226
+ Image generation tool
227
+ Raises:
228
+ RuntimeError: If tool initialization fails
229
+ """
230
  try:
231
  return Tool.from_space(
232
+ space_id="xkerser/flux.1-dev",
233
  name="image_generator",
234
  description=(
235
  "Generates high-quality AgentImage. "
236
+ "With text prompt (77 token limit)."
237
  ),
238
  )
239
+ except (
240
+ ConnectionError,
241
+ ValueError,
242
+ RuntimeError,
243
+ ) as e: # More specific exceptions
244
+ print(f" Couldn't initialize image generation tool: {e}")
245
+ raise RuntimeError(f"Image generation tool initialization failed: {e}")
246
 
247
  @staticmethod
248
  def load_clean_text_tool():
249
+ """
250
+ Initialize and return text cleaning tool.
251
+ Returns:
252
+ Text cleaning tool
253
+ Raises:
254
+ RuntimeError: If tool initialization fails
255
+ """
256
  try:
257
  return TextCleanerTool()
258
+ except (ValueError, RuntimeError) as e: # More specific exceptions
259
+ print(f" Couldn't initialize clean text tool: {e}")
260
+ raise RuntimeError(f"Clean text tool initialization failed: {e}")
261
 
262
 
263
  def create_agent():
264
+ """
265
+ Creates a fresh agent instance with properly configured tools.
266
+ Returns:
267
+ CodeAgent: Configured agent ready for use
268
+ Raises:
269
+ ValueError: If tool validation fails
270
+ RuntimeError: If agent creation fails
271
+ """
272
+ try:
273
+ # Initialize model
274
+ model = LiteLLMModel(
275
+ custom_role_conversions=CUSTOM_ROLE_CONVERSIONS,
276
+ model_id="openrouter/deepseek/deepseek-chat-v3-0324:free",
277
+ )
278
+
279
+ # Initialize tools
280
+ text_limit = 30000
281
+ browser = SimpleTextBrowser(**BROWSER_CONFIG)
 
 
 
 
 
 
 
 
 
282
 
283
+ # Collect all tools in a single list
284
+ web_tools = ToolRegistry.load_web_tools(model, browser, text_limit)
285
+ image_generator = ToolRegistry.load_image_generation_tools()
286
+ clean_text = TextCleanerTool()
 
 
 
 
287
 
288
+ # Combine all tools into a single list
289
+ all_tools = [visualizer] + web_tools + [image_generator, clean_text]
290
+
291
+ # Validate tools before creating agent
292
+ for tool in all_tools:
293
+ if not isinstance(tool, Tool):
294
+ raise ValueError(
295
+ f"Invalid tool type: {type(tool)}. "
296
+ f"All tools must be instances of Tool class."
297
+ )
298
+
299
+ return CodeAgent(
300
+ model=model,
301
+ tools=all_tools,
302
+ max_steps=12,
303
+ verbosity_level=2,
304
+ additional_authorized_imports=AUTHORIZED_IMPORTS,
305
+ planning_interval=4,
306
+ )
307
+ except (ValueError, RuntimeError) as e:
308
+ print(f"Failed to create agent: {e}")
309
+ raise RuntimeError(f"Agent creation failed: {e}")
310
+
311
+
312
+ # Define standalone functions outside of classes
313
+ def process_message_content(content_lower: str) -> Dict[str, bool]:
314
+ """
315
+ Process message content to determine message type.
316
+ Args:
317
+ content_lower: Lowercase message content
318
+ Returns:
319
+ Dictionary with message type flags
320
+ """
321
+ return {
322
+ "is_document_analysis": "document analysis" in content_lower,
323
+ "is_search": "search" in content_lower,
324
+ "is_error": "error" in content_lower,
325
+ }
326
 
327
  def stream_to_gradio(
328
  agent,
329
  task: str,
330
  reset_agent_memory: bool = False,
331
+ additional_args: Optional[Dict] = None,
332
  ):
333
+ """
334
+ Streams agent responses with improved status indicators.
335
+ Args:
336
+ agent: The agent instance to use
337
+ task: The task to perform
338
+ reset_agent_memory: Whether to reset agent memory
339
+ additional_args: Optional additional arguments
340
+ Yields:
341
+ Gradio ChatMessage objects
342
+ """
343
  try:
344
  # Initial processing indicator
345
  yield gr.ChatMessage(role="assistant", content="⏳ Processing your request...")
 
347
  # Track what we've yielded to replace the processing indicator
348
  first_message_yielded = False
349
 
350
+ # Store the step_log outside the loop to avoid the undefined-loop-variable issue
351
+ steps = list(
352
+ agent.run(
353
+ task,
354
+ stream=True,
355
+ reset=reset_agent_memory,
356
+ additional_args=additional_args,
357
+ )
358
+ )
359
+
360
+ # If no steps were returned, handle it gracefully
361
+ if not steps:
362
+ yield gr.ChatMessage(
363
+ role="assistant", content="⚠️ No response from agent. Please try again."
364
+ )
365
+ return
366
+
367
+ # Process each step
368
+ for step_log in steps:
369
  # pull_messages_from_step is a generator function that yields messages
 
370
  for message in pull_messages_from_step(step_log):
371
  if not first_message_yielded:
372
+ # Replace the initial "processing" message
373
  first_message_yielded = True
374
  message.content = message.content.replace(
375
  "⏳ Processing your request...", ""
376
  )
377
 
378
  # Check message content for document analysis or search references
379
+ if hasattr(message, "content") and message.content:
380
+ content_lower = message.content.lower()
381
+ message_types = process_message_content(content_lower)
382
 
383
+ if message_types["is_document_analysis"]:
384
+ message.content = f"📄 **Document Analysis:** {message.content}"
385
+ elif message_types["is_search"]:
386
+ message.content = f"🔍 **Search:** {message.content}"
387
 
388
  yield message
389
 
390
  # Final answer with enhanced formatting
391
+ if steps: # Make sure we have at least one step before accessing
392
+ final_answer = handle_agent_output_types(steps[-1]) # Use the last step
393
+ if isinstance(final_answer, AgentText):
394
+ yield gr.ChatMessage(
395
+ role="assistant",
396
+ content=f"✅ **Final Answer:**\n{final_answer.to_string()}",
397
+ )
398
+ else:
399
+ yield gr.ChatMessage(
400
+ role="assistant",
401
+ content=f"✅ **Final Answer:** {str(final_answer)}",
402
+ )
403
 
404
+ except (ValueError, RuntimeError) as e:
405
+ # More specific error handling
406
  yield gr.ChatMessage(
407
  role="assistant",
408
  content=(
409
+ f"❌ **Error:** {str(e)}\n" f"Please try again with a different query."
 
410
  ),
411
  )
412
+ except Exception as e: # Fallback for truly unexpected errors
413
+ print(f"Unexpected error in stream_to_gradio: {e}")
414
+ traceback.print_exc()
415
+ yield gr.ChatMessage(
416
+ role="assistant",
417
+ content=(
418
+ "❌ **Unexpected Error:** An unknown error occurred.\n"
419
+ "Please try again or contact support if the issue persists."
420
+ ),
421
+ )
422
+
423
+
424
+ # This is a helper method that can be called statically
425
+ def cleanup_old_files(directory: str, days: int = FILE_RETENTION_DAYS):
426
+ """
427
+ Removes files older than the specified number of days.
428
+ Args:
429
+ directory: Directory to clean up
430
+ days: Number of days to keep files
431
+ """
432
+ if not os.path.exists(directory):
433
+ return
434
+
435
+ cutoff_time = time.time() - (days * 24 * 60 * 60)
436
+ for filename in os.listdir(directory):
437
+ file_path = os.path.join(directory, filename)
438
+ if os.path.isfile(file_path):
439
+ file_mod_time = os.path.getmtime(file_path)
440
+ if file_mod_time < cutoff_time:
441
+ try:
442
+ os.remove(file_path)
443
+ print(f"Deleted old file: {file_path}")
444
+ except (PermissionError, OSError) as e:
445
+ print(f"Failed to delete {file_path}: {str(e)}")
446
+
447
+
448
+ @dataclass
449
+ class UIComponents:
450
+ """Container for UI components to reduce main class attribute count."""
451
+
452
+ text_input: Any = None
453
+ submit_btn: Any = None
454
+ stop_btn: Any = None
455
+ clear_btn: Any = None
456
+ status: Any = None
457
+ chatbot: Any = None
458
+ file_uploader: Any = None # renamed from upload_file to avoid conflict
459
+ upload_status: Any = None
460
 
461
 
462
  class GradioUI:
463
+ """Gradio user interface for the OpenDeepResearch application."""
464
+
465
  def __init__(self, file_upload_folder=None, max_queue_size=50):
466
+ """Initialize the Gradio UI."""
467
+ # Basic configuration
468
  self.file_upload_folder = file_upload_folder
469
  self.max_queue_size = max_queue_size
470
+ self.max_chat_history = MAX_CHAT_HISTORY
471
+ self.max_file_size_mb = MAX_FILE_SIZE_MB
472
+
473
+ # Initialize UI components container
474
+ self.components = UIComponents()
475
+
476
+ # Job handle for cancellation
477
  self.job = None
478
 
479
+ # Create upload directory if specified
480
+ if self.file_upload_folder is not None: # Simplified if expression
481
  os.makedirs(file_upload_folder, exist_ok=True)
482
 
483
+ # Clean up old files
484
+ if file_upload_folder:
485
+ cleanup_old_files(file_upload_folder)
486
 
487
+ def interact_with_agent(self, prompt, messages, session_state):
488
+ """
489
+ Main interaction handler with the agent.
490
+ Args:
491
+ prompt: User input prompt
492
+ messages: Current message history
493
+ session_state: Session state dictionary
494
+ Yields:
495
+ Updated message history
496
+ """
497
  # Get or create session-specific agent
498
  if "agent" not in session_state:
499
+ try:
500
+ session_state["agent"] = create_agent()
501
+ except RuntimeError as e:
502
+ messages.append(
503
+ gr.ChatMessage(
504
+ role="assistant", content=f"Failed to create agent: {str(e)}"
505
+ )
506
+ )
507
+ yield messages
508
+ return
509
 
 
510
  try:
511
  # Log the existence of agent memory
512
  has_memory = hasattr(session_state["agent"], "memory")
513
  print(f"Agent has memory: {has_memory}")
514
+ if has_memory and hasattr(session_state["agent"].memory, "steps"):
515
+ print(f"Memory steps: {len(session_state['agent'].memory.steps)}")
516
 
517
+ # Truncate messages if they exceed the maximum
518
+ if len(messages) > self.max_chat_history:
519
+ # Keep only the latest messages
520
+ messages = messages[-self.max_chat_history :]
521
+
522
+ # Add user message
523
  messages.append(gr.ChatMessage(role="user", content=prompt))
524
  yield messages
525
 
526
+ # Process with agent and stream responses
527
  for msg in stream_to_gradio(
528
  session_state["agent"], task=prompt, reset_agent_memory=False
529
  ):
530
  messages.append(msg)
531
+ yield messages
 
532
 
533
+ except ValueError as e:
534
+ print(f"Value error in interaction: {str(e)}")
535
+ messages.append(
536
+ gr.ChatMessage(role="assistant", content=f"Input error: {str(e)}")
537
+ )
538
+ yield messages
539
  except Exception as e:
540
  print(f"Error in interaction: {str(e)}")
541
+ traceback.print_exc()
542
+ messages.append(
543
+ gr.ChatMessage(role="assistant", content=f"Error occurred: {str(e)}")
544
+ )
545
+ yield messages
546
+
547
+ def handle_file_upload(self, file, file_uploads_log):
548
+ """
549
+ Handle file uploads with proper validation and security.
550
+ Args:
551
+ file: File to upload
552
+ file_uploads_log: List of uploaded files
553
+ Returns:
554
+ Tuple of (status textbox, updated file_uploads_log)
555
+ """
556
  if file is None:
557
+ return gr.Textbox(value="No file uploaded", visible=True), file_uploads_log
558
 
559
  try:
560
+ # Validate file exists
561
+ if not os.path.exists(file.name):
562
+ return (
563
+ gr.Textbox(value="File not found", visible=True),
564
+ file_uploads_log,
565
+ )
566
 
567
+ # Check file size
568
+ file_size_mb = os.path.getsize(file.name) / (1024 * 1024)
569
+ if file_size_mb > self.max_file_size_mb:
570
+ return (
571
+ gr.Textbox(
572
+ value=f"File size exceeds {self.max_file_size_mb} MB limit.",
573
+ visible=True,
574
+ ),
575
+ file_uploads_log,
576
+ )
577
 
578
+ # Validate mime type
579
+ mime_type, _ = mimetypes.guess_type(file.name)
580
+ if mime_type not in ALLOWED_FILE_TYPES:
581
+ return (
582
+ gr.Textbox(value="File type disallowed", visible=True),
583
+ file_uploads_log,
584
+ )
585
 
586
+ # Sanitize file name
587
+ original_name = os.path.basename(file.name)
588
+ # Replace invalid chars with underscores
589
+ sanitized_name = re.sub(r"[^\w\-.]", "_", original_name)
590
+ # Add timestamp to ensure uniqueness
591
+ timestamp = datetime.now().strftime(
592
+ "%y%m%d_%H%M%S"
593
+ ) # Correct format string
594
+ name_parts = os.path.splitext(sanitized_name)
595
+ sanitized_name = f"{name_parts[0]}_{timestamp}{name_parts[1]}"
596
+
597
+ # Save the uploaded file to the specified folder
598
+ file_path = os.path.join(self.file_upload_folder, sanitized_name)
599
+ shutil.copy(file.name, file_path)
600
 
601
+ return (
602
+ gr.Textbox(value=f"File uploaded: {file_path}", visible=True),
603
+ file_uploads_log + [file_path],
604
+ )
605
 
606
+ except FileNotFoundError as e:
607
  return (
608
+ gr.Textbox(value=f"File not found: {str(e)}", visible=True),
609
+ file_uploads_log,
610
+ )
611
+ except PermissionError as e:
612
+ return (
613
+ gr.Textbox(value=f"Permission denied: {str(e)}", visible=True),
614
+ file_uploads_log,
615
+ )
616
+ except (IOError, OSError) as e:
617
+ return (
618
+ gr.Textbox(value=f"I/O error during upload: {str(e)}", visible=True),
619
+ file_uploads_log,
620
+ )
621
+ except Exception as e:
622
+ # For truly unexpected errors, log with more detail
623
+ print(f"Unexpected upload error: {e}")
624
+ traceback.print_exc()
625
+ return (
626
+ gr.Textbox(value=f"Error processing upload: {str(e)}", visible=True),
627
  file_uploads_log,
628
  )
 
 
 
 
 
 
 
 
629
 
630
  def log_user_message(self, text_input, file_uploads_log):
631
+ """
632
+ Process user message and handle file references.
633
+ Args:
634
+ text_input: User's text input
635
+ file_uploads_log: List of uploaded files
636
+ Returns:
637
+ Tuple of (processed message, updated text input, submit button)
638
+ """
639
+ if not text_input.strip():
640
+ return (
641
+ "",
642
+ gr.Textbox(value="", interactive=True),
643
+ gr.Button(interactive=True),
644
+ )
645
+
646
+ # Only clean if necessary (avoid unnecessary processing)
647
+ message = text_input
648
+ if any(char in text_input for char in "€¥£-"):
649
+ message = clean(
650
+ text_input,
651
+ fix_unicode=True,
652
+ to_ascii=True,
653
+ lower=False, # Keep original case
654
+ no_line_breaks=False,
655
+ no_urls=False,
656
+ no_emails=False,
657
+ no_phone_numbers=False,
658
+ no_numbers=False,
659
+ no_digits=False,
660
+ no_currency_symbols=False,
661
+ no_punct=False,
662
+ lang="en",
663
+ )
664
 
665
+ # Add file references if any
666
  if file_uploads_log:
667
+ files_info = "\n".join(
668
+ [f"- {os.path.basename(f)}" for f in file_uploads_log]
 
 
669
  )
670
+ message += f"\nYou have been provided with these files:\n{files_info}"
671
 
672
  return (
673
  message,
674
  gr.Textbox(
675
  value="",
676
  interactive=False,
677
+ placeholder="Processing your request...",
678
  ),
679
  gr.Button(interactive=False),
680
  )
681
 
682
+ def clear_chat(self):
683
+ """
684
+ Clear the chat history and reset UI elements.
685
+ Returns:
686
+ Tuple of (empty chat history, interactive text input, interactive button, empty status)
687
+ """
688
+ return (
689
+ [], # Empty chat history
690
+ [], # Empty stored messages
691
+ gr.Textbox(value="", interactive=True),
692
+ gr.Button(interactive=True),
693
+ gr.Textbox(value="", visible=False), # Clear status
694
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
695
 
696
+ def launch(self, share=False, **kwargs):
697
+ """
698
+ Launch the Gradio UI with responsive layout.
699
+ Args:
700
+ share: Whether to create a public link
701
+ **kwargs: Additional keyword arguments for launch
702
+ """
703
  with gr.Blocks(theme="ocean", fill_height=True) as demo:
704
+ # Use Gradio's built-in responsive layout
705
+ with gr.Row():
706
+ # Sidebar (smaller on mobile)
707
+ with gr.Column(scale=1, min_width=100):
708
+ gr.Markdown(
709
+ """# OpenDeepResearch
710
+ AI-powered research assistant using SmoLAgents
711
+ Model: deepseek/deepseek-chat-v3-0324:free"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
  )
 
713
 
714
+ with gr.Group():
715
+ gr.Markdown("**Research Query**", container=True)
716
+ self.components.text_input = gr.Textbox(
717
+ lines=3,
718
+ label="Your request",
719
+ placeholder="Enter your research question or task",
720
+ container=False,
721
+ )
722
+
723
+ with gr.Row():
724
+ self.components.submit_btn = gr.Button(
725
+ "Run", variant="primary"
726
+ )
727
+ self.components.stop_btn = gr.Button("Stop", variant="stop")
728
+ self.components.clear_btn = gr.Button(
729
+ "Clear", variant="secondary"
730
+ )
731
+
732
+ # File upload in collapsible section
733
+ if self.file_upload_folder is not None:
734
+ with gr.Accordion("Upload Files", open=False):
735
+ self.components.file_uploader = gr.File(
736
+ label="Upload a file",
737
+ file_types=["pdf", "docx", "txt", "md", "json"],
738
+ type="file",
739
+ )
740
+ self.components.upload_status = gr.Textbox(
741
+ label="Upload status", interactive=False, visible=False
742
+ )
743
+
744
+ # Tool information
745
+ with gr.Accordion("Available Tools", open=False):
746
+ gr.Markdown(
747
+ """
748
+ - **Web Search**: Find information online
749
+ - **Document Analysis**: Analyze uploaded documents
750
+ - **Text Cleaning**: Format and clean text
751
+ - **Image Generation**: Create images from descriptions
752
+ """
753
+ )
754
+
755
+ gr.HTML("<br><h5>Powered by:</h5>")
756
+ with gr.Row():
757
+ gr.HTML(
758
+ """
759
+ <div style="display: flex; align-items: center; gap: 8px;
760
+ font-family: system-ui, -apple-system, sans-serif;">
761
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png"
762
+ style="width: 32px; height: 32px; object-fit: contain;"
763
+ alt="logo">
764
+ <a target="_blank" href="https://github.com/huggingface/smolagents">
765
+ <b>huggingface/smolagents</b>
766
+ </a>
767
+ </div>
768
+ """
769
+ )
770
+
771
+ # Main chat area (larger)
772
+ with gr.Column(scale=3, min_width=500):
773
+ # Add session state to store session-specific data
774
+ session_state = gr.State({})
775
+ stored_messages = gr.State([])
776
  file_uploads_log = gr.State([])
 
 
 
 
 
777
 
778
+ # Chat interface
779
+ self.components.chatbot = gr.Chatbot(
780
+ label="Research Assistant",
781
+ type="messages",
782
+ avatar_images=(
783
+ None,
784
+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
785
+ ),
786
+ height=600,
787
+ elem_id="research-chatbot",
 
 
 
 
788
  )
789
 
790
+ # Status indicator
791
+ self.components.status = gr.Textbox(
792
+ "", label="Status", interactive=False, visible=False
793
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
794
 
795
+ # Connect event handlers with appropriate cancellation
796
+ # File upload handler
797
+ if hasattr(self.components, "file_uploader") and hasattr(
798
+ self.components, "upload_status"
799
+ ):
800
+ self.components.file_uploader.change(
801
+ self.handle_file_upload,
802
+ [self.components.file_uploader, file_uploads_log],
803
+ [self.components.upload_status, file_uploads_log],
804
+ )
805
+
806
+ # Text input handler with cancellation
807
+ submit_event = (
808
+ self.components.text_input.submit(
809
+ self.log_user_message,
810
+ [self.components.text_input, file_uploads_log],
811
+ [
812
+ stored_messages,
813
+ self.components.text_input,
814
+ self.components.submit_btn,
815
+ ],
816
+ )
817
+ .then(
818
+ self.interact_with_agent,
819
+ [stored_messages, self.components.chatbot, session_state],
820
+ [self.components.chatbot],
821
+ )
822
+ .then(
823
+ lambda: (
824
+ gr.Textbox(interactive=True),
825
+ gr.Button(interactive=True),
826
+ ),
827
+ None,
828
+ [self.components.text_input, self.components.submit_btn],
829
+ )
830
+ )
831
 
832
+ # Button click handler with same flow
833
+ click_event = (
834
+ self.components.submit_btn.click(
835
+ self.log_user_message,
836
+ [self.components.text_input, file_uploads_log],
837
+ [
838
+ stored_messages,
839
+ self.components.text_input,
840
+ self.components.submit_btn,
841
+ ],
842
+ )
843
+ .then(
844
+ self.interact_with_agent,
845
+ [stored_messages, self.components.chatbot, session_state],
846
+ [self.components.chatbot],
847
+ )
848
+ .then(
849
+ lambda: (
850
+ gr.Textbox(interactive=True),
851
+ gr.Button(interactive=True),
852
+ ),
853
+ None,
854
+ [self.components.text_input, self.components.submit_btn],
855
+ )
856
+ )
857
 
858
+ # Stop button cancels ongoing operations
859
+ self.components.stop_btn.click(
860
+ None, None, None, cancels=[submit_event, click_event]
861
+ )
 
 
 
 
 
862
 
863
+ # Clear button
864
+ self.components.clear_btn.click(
865
+ self.clear_chat,
866
+ None,
867
+ [
868
+ self.components.chatbot,
869
+ stored_messages,
870
+ self.components.text_input,
871
+ self.components.submit_btn,
872
+ self.components.status,
873
+ ],
874
+ )
875
 
876
+ # Launch with enhanced queue settings
877
+ demo.queue(
878
+ max_size=self.max_queue_size,
879
+ concurrency_count=3, # Allow multiple concurrent requests
880
+ status_update_rate=10, # Update status more frequently
881
+ ).launch(
882
+ share=share,
883
+ debug=True,
884
+ # Enable HTTPS in production
885
+ ssl_verify=False if kwargs.get("local_port") else True,
886
+ **kwargs,
887
  )
888
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
889
 
890
  def main():
891
+ """
892
+ Main entry point for the application.
893
+ Returns:
894
+ int: Exit code (0 for success, 1 for failure)
895
+ """
896
+ try:
897
+ # Initialize environment
898
+ if not setup_environment():
899
+ print("Failed to set up environment properly.")
900
+ return 1
901
+
902
+ # Ensure downloads folder exists
903
+ downloads_folder = BROWSER_CONFIG["downloads_folder"]
904
+ os.makedirs(f"./{downloads_folder}", exist_ok=True)
905
+
906
+ # Create uploads folder
907
+ uploads_folder = "uploaded_files"
908
+ os.makedirs(uploads_folder, exist_ok=True)
909
+
910
+ # Launch UI
911
+ print("Starting OpenDeepResearch Gradio interface...")
912
+ gradio_ui = GradioUI(file_upload_folder=uploads_folder)
913
+ gradio_ui.launch()
914
+
915
+ return 0
916
+
917
+ except KeyError as e:
918
+ print(f"Configuration error: Missing key {e}")
919
+ traceback.print_exc()
920
+ return 1
921
+ except Exception as e:
922
+ print(f"Application failed to start: {e}")
923
+ traceback.print_exc()
924
+ return 1
925
 
926
 
927
  if __name__ == "__main__":
928
+ EXIT_CODE = main() # UPPER_CASE for constants
929
+ sys.exit(EXIT_CODE) # Use sys.exit instead of exit