mknolan commited on
Commit
04de987
·
verified ·
1 Parent(s): f8ebad0

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +358 -8
app.py CHANGED
@@ -19,6 +19,123 @@ from pdf2image import convert_from_path, convert_from_bytes
19
  import tempfile
20
  import logging
21
  import traceback
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  # Constants
24
  IMAGENET_MEAN = (0.485, 0.456, 0.406)
@@ -35,13 +152,19 @@ os.makedirs(OUTPUT_DIR, exist_ok=True)
35
  # Set up logging to write to saved_outputs directory
36
  log_file = os.path.join(OUTPUT_DIR, f"debug_log_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.log")
37
 
 
 
 
 
 
38
  # Configure logging
39
  logging.basicConfig(
40
  level=logging.DEBUG,
41
  format='%(asctime)s [%(levelname)s] %(message)s',
42
  handlers=[
43
  logging.FileHandler(log_file),
44
- logging.StreamHandler(sys.stdout)
 
45
  ]
46
  )
47
 
@@ -580,6 +703,7 @@ def analyze_with_prompt(image_input, prompt):
580
 
581
  img = img.convert('RGB')
582
 
 
583
  # Get raw analysis from model
584
  result = process_image_with_text(img, prompt)
585
  results.append(result)
@@ -1216,6 +1340,11 @@ def analyze_folder_images(folder_path, prompt):
1216
  # Function to process an image with text prompt
1217
  def process_image_with_text(image, prompt):
1218
  """Process a single image with the InternVL model and a text prompt."""
 
 
 
 
 
1219
  try:
1220
  logger.info(f"process_image_with_text called with image type: {type(image)}")
1221
 
@@ -1299,8 +1428,13 @@ def process_image_with_text(image, prompt):
1299
 
1300
  logger.info(f"Final tensor prepared: shape={tensor.shape}, device={tensor.device}, dtype={tensor.dtype}")
1301
  except Exception as tensor_err:
1302
- logger.error(f"Error in tensor creation: {str(tensor_err)}")
 
1303
  logger.error(traceback.format_exc())
 
 
 
 
1304
  return f"Error preparing image for analysis: {str(tensor_err)}"
1305
 
1306
  # Process the prompt
@@ -1332,10 +1466,19 @@ def process_image_with_text(image, prompt):
1332
  output = tokenizer.decode(output_ids[0], skip_special_tokens=True)
1333
  logger.debug(f"Decoded output length: {len(output)} chars")
1334
 
 
 
 
 
1335
  return output.strip()
1336
  except Exception as gen_error:
1337
- logger.error(f"Direct generation failed: {str(gen_error)}")
 
1338
  logger.error(traceback.format_exc())
 
 
 
 
1339
 
1340
  # Approach 2: Try the chat method
1341
  try:
@@ -1360,10 +1503,19 @@ def process_image_with_text(image, prompt):
1360
  logger.info("Chat method successful")
1361
  logger.debug(f"Chat response length: {len(response)} chars")
1362
 
 
 
 
 
1363
  return response.strip()
1364
  except Exception as chat_error:
1365
- logger.error(f"Chat method failed: {str(chat_error)}")
 
1366
  logger.error(traceback.format_exc())
 
 
 
 
1367
 
1368
  # Approach 3: Try direct model forward pass
1369
  try:
@@ -1399,22 +1551,47 @@ def process_image_with_text(image, prompt):
1399
  response = tokenizer.decode(pred_ids[0], skip_special_tokens=True)
1400
  logger.debug(f"Decoded response length: {len(response)} chars")
1401
 
 
 
 
 
1402
  return response.strip()
1403
  else:
1404
- logger.error("Model output does not contain logits")
 
 
 
 
1405
  return "Failed to analyze image - model output contains no usable data"
1406
  else:
1407
- logger.error("Model does not have forward method")
 
 
 
 
1408
  return "Failed to analyze image - model doesn't support direct calling"
1409
  except Exception as forward_error:
1410
- logger.error(f"Forward method failed: {str(forward_error)}")
 
1411
  logger.error(traceback.format_exc())
1412
 
 
 
 
 
 
1413
  # All methods failed
1414
  return f"Error generating analysis: All methods failed to process the image"
1415
  except Exception as e:
1416
- logger.error(f"Fatal error in process_image_with_text: {str(e)}")
 
1417
  logger.error(traceback.format_exc())
 
 
 
 
 
 
1418
  return f"Error processing image: {str(e)}"
1419
 
1420
  # Function to get log file content
@@ -1461,11 +1638,184 @@ def main():
1461
  "Summarize what you see in this image in one paragraph."
1462
  ]
1463
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464
  # Create tabs for different modes
1465
  with gr.Blocks(title="InternVL2.5 Image Analyzer", theme=gr.themes.Soft()) as demo:
1466
  gr.Markdown("# InternVL2.5 Image Analyzer")
1467
  gr.Markdown("Analyze images using the InternVL2.5 model. You can upload individual images or analyze all images in a folder.")
1468
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1469
  with gr.Tabs():
1470
  # Tab for single image analysis
1471
  with gr.Tab("Single Image Analysis"):
 
19
  import tempfile
20
  import logging
21
  import traceback
22
+ import io
23
+ import threading
24
+ import queue
25
+ from typing import List, Dict, Any
26
+
27
+ # In-memory stats for GUI debug display
28
+ gui_stats = {
29
+ 'errors': 0,
30
+ 'warnings': 0,
31
+ 'last_error': 'None',
32
+ 'last_warning': 'None',
33
+ 'last_error_time': '',
34
+ 'last_warning_time': '',
35
+ 'operations_completed': 0,
36
+ 'start_time': datetime.datetime.now(),
37
+ 'tensor_issues': 0
38
+ }
39
+
40
+ # Function to get stats for UI display
41
+ def get_debug_stats():
42
+ uptime = datetime.datetime.now() - gui_stats['start_time']
43
+ hours, remainder = divmod(uptime.seconds, 3600)
44
+ minutes, seconds = divmod(remainder, 60)
45
+ uptime_str = f"{hours}h {minutes}m {seconds}s"
46
+
47
+ return {
48
+ 'errors': gui_stats['errors'],
49
+ 'warnings': gui_stats['warnings'],
50
+ 'last_error': gui_stats['last_error'],
51
+ 'last_error_time': gui_stats['last_error_time'],
52
+ 'last_warning': gui_stats['last_warning'],
53
+ 'last_warning_time': gui_stats['last_warning_time'],
54
+ 'operations': gui_stats['operations_completed'],
55
+ 'uptime': uptime_str,
56
+ 'tensor_issues': gui_stats['tensor_issues']
57
+ }
58
+
59
+ # Function to format debug stats as HTML
60
+ def format_debug_stats_html():
61
+ stats = get_debug_stats()
62
+
63
+ error_color = "#ff5555" if stats['errors'] > 0 else "#555555"
64
+ warning_color = "#ffaa00" if stats['warnings'] > 0 else "#555555"
65
+
66
+ html = f"""
67
+ <div style="margin: 10px 0; padding: 10px; border: 1px solid #ddd; border-radius: 4px; background-color: #f9f9f9;">
68
+ <div style="display: flex; justify-content: space-between;">
69
+ <div style="flex: 1;">
70
+ <p><strong>Errors:</strong> <span style="color: {error_color};">{stats['errors']}</span></p>
71
+ <p><strong>Warnings:</strong> <span style="color: {warning_color};">{stats['warnings']}</span></p>
72
+ <p><strong>Operations:</strong> {stats['operations']}</p>
73
+ </div>
74
+ <div style="flex: 1;">
75
+ <p><strong>Uptime:</strong> {stats['uptime']}</p>
76
+ <p><strong>Tensor Issues:</strong> {stats['tensor_issues']}</p>
77
+ </div>
78
+ </div>
79
+ <div style="margin-top: 10px; border-top: 1px solid #ddd; padding-top: 10px;">
80
+ <p><strong>Last Error:</strong> {stats['last_error_time']} - {stats['last_error']}</p>
81
+ <p><strong>Last Warning:</strong> {stats['last_warning_time']} - {stats['last_warning']}</p>
82
+ </div>
83
+ </div>
84
+ """
85
+ return html
86
+
87
+ # Custom logging handler that captures logs for GUI display
88
+ class GUILogHandler(logging.Handler):
89
+ def __init__(self, max_entries=100):
90
+ super().__init__()
91
+ self.log_queue = queue.Queue()
92
+ self.max_entries = max_entries
93
+ self.log_entries = []
94
+ self.lock = threading.Lock()
95
+
96
+ def emit(self, record):
97
+ try:
98
+ log_entry = self.format(record)
99
+
100
+ # Track error and warning counts
101
+ if record.levelno >= logging.ERROR:
102
+ gui_stats['errors'] += 1
103
+ gui_stats['last_error'] = record.getMessage()
104
+ gui_stats['last_error_time'] = datetime.datetime.now().strftime("%H:%M:%S")
105
+
106
+ # Check for specific error types
107
+ if "list" in record.getMessage() and "unsqueeze" in record.getMessage():
108
+ gui_stats['tensor_issues'] += 1
109
+
110
+ elif record.levelno >= logging.WARNING:
111
+ gui_stats['warnings'] += 1
112
+ gui_stats['last_warning'] = record.getMessage()
113
+ gui_stats['last_warning_time'] = datetime.datetime.now().strftime("%H:%M:%S")
114
+
115
+ with self.lock:
116
+ self.log_entries.append(log_entry)
117
+ # Keep only the most recent entries
118
+ if len(self.log_entries) > self.max_entries:
119
+ self.log_entries = self.log_entries[-self.max_entries:]
120
+ self.log_queue.put(log_entry)
121
+ except Exception:
122
+ self.handleError(record)
123
+
124
+ def get_logs(self, last_n=None):
125
+ with self.lock:
126
+ if last_n is not None:
127
+ return "\n".join(self.log_entries[-last_n:])
128
+ return "\n".join(self.log_entries)
129
+
130
+ def get_latest(self):
131
+ try:
132
+ return self.log_queue.get_nowait()
133
+ except queue.Empty:
134
+ return None
135
+
136
+ def clear(self):
137
+ with self.lock:
138
+ self.log_entries = []
139
 
140
  # Constants
141
  IMAGENET_MEAN = (0.485, 0.456, 0.406)
 
152
  # Set up logging to write to saved_outputs directory
153
  log_file = os.path.join(OUTPUT_DIR, f"debug_log_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.log")
154
 
155
+ # Create a GUI log handler
156
+ gui_log_handler = GUILogHandler(max_entries=500)
157
+ gui_log_handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
158
+ gui_log_handler.setLevel(logging.DEBUG)
159
+
160
  # Configure logging
161
  logging.basicConfig(
162
  level=logging.DEBUG,
163
  format='%(asctime)s [%(levelname)s] %(message)s',
164
  handlers=[
165
  logging.FileHandler(log_file),
166
+ logging.StreamHandler(sys.stdout),
167
+ gui_log_handler
168
  ]
169
  )
170
 
 
703
 
704
  img = img.convert('RGB')
705
 
706
+
707
  # Get raw analysis from model
708
  result = process_image_with_text(img, prompt)
709
  results.append(result)
 
1340
  # Function to process an image with text prompt
1341
  def process_image_with_text(image, prompt):
1342
  """Process a single image with the InternVL model and a text prompt."""
1343
+ start_time = time.time()
1344
+
1345
+ # Increment operations counter
1346
+ gui_stats['operations_completed'] += 1
1347
+
1348
  try:
1349
  logger.info(f"process_image_with_text called with image type: {type(image)}")
1350
 
 
1428
 
1429
  logger.info(f"Final tensor prepared: shape={tensor.shape}, device={tensor.device}, dtype={tensor.dtype}")
1430
  except Exception as tensor_err:
1431
+ error_msg = f"Error in tensor creation: {str(tensor_err)}"
1432
+ logger.error(error_msg)
1433
  logger.error(traceback.format_exc())
1434
+ # Update in-memory error statistics
1435
+ gui_stats['errors'] += 1
1436
+ gui_stats['last_error'] = error_msg
1437
+ gui_stats['last_error_time'] = datetime.datetime.now().strftime("%H:%M:%S")
1438
  return f"Error preparing image for analysis: {str(tensor_err)}"
1439
 
1440
  # Process the prompt
 
1466
  output = tokenizer.decode(output_ids[0], skip_special_tokens=True)
1467
  logger.debug(f"Decoded output length: {len(output)} chars")
1468
 
1469
+ # Log completion time
1470
+ elapsed = time.time() - start_time
1471
+ logger.info(f"Image processing completed in {elapsed:.2f} seconds")
1472
+
1473
  return output.strip()
1474
  except Exception as gen_error:
1475
+ error_msg = f"Direct generation failed: {str(gen_error)}"
1476
+ logger.error(error_msg)
1477
  logger.error(traceback.format_exc())
1478
+ # Update in-memory error statistics
1479
+ gui_stats['errors'] += 1
1480
+ gui_stats['last_error'] = error_msg
1481
+ gui_stats['last_error_time'] = datetime.datetime.now().strftime("%H:%M:%S")
1482
 
1483
  # Approach 2: Try the chat method
1484
  try:
 
1503
  logger.info("Chat method successful")
1504
  logger.debug(f"Chat response length: {len(response)} chars")
1505
 
1506
+ # Log completion time
1507
+ elapsed = time.time() - start_time
1508
+ logger.info(f"Image processing (fallback chat) completed in {elapsed:.2f} seconds")
1509
+
1510
  return response.strip()
1511
  except Exception as chat_error:
1512
+ error_msg = f"Chat method failed: {str(chat_error)}"
1513
+ logger.error(error_msg)
1514
  logger.error(traceback.format_exc())
1515
+ # Update in-memory error statistics
1516
+ gui_stats['errors'] += 1
1517
+ gui_stats['last_error'] = error_msg
1518
+ gui_stats['last_error_time'] = datetime.datetime.now().strftime("%H:%M:%S")
1519
 
1520
  # Approach 3: Try direct model forward pass
1521
  try:
 
1551
  response = tokenizer.decode(pred_ids[0], skip_special_tokens=True)
1552
  logger.debug(f"Decoded response length: {len(response)} chars")
1553
 
1554
+ # Log completion time
1555
+ elapsed = time.time() - start_time
1556
+ logger.info(f"Image processing (fallback forward) completed in {elapsed:.2f} seconds")
1557
+
1558
  return response.strip()
1559
  else:
1560
+ error_msg = "Model output does not contain logits"
1561
+ logger.error(error_msg)
1562
+ gui_stats['errors'] += 1
1563
+ gui_stats['last_error'] = error_msg
1564
+ gui_stats['last_error_time'] = datetime.datetime.now().strftime("%H:%M:%S")
1565
  return "Failed to analyze image - model output contains no usable data"
1566
  else:
1567
+ error_msg = "Model does not have forward method"
1568
+ logger.error(error_msg)
1569
+ gui_stats['errors'] += 1
1570
+ gui_stats['last_error'] = error_msg
1571
+ gui_stats['last_error_time'] = datetime.datetime.now().strftime("%H:%M:%S")
1572
  return "Failed to analyze image - model doesn't support direct calling"
1573
  except Exception as forward_error:
1574
+ error_msg = f"Forward method failed: {str(forward_error)}"
1575
+ logger.error(error_msg)
1576
  logger.error(traceback.format_exc())
1577
 
1578
+ # Update in-memory error statistics
1579
+ gui_stats['errors'] += 1
1580
+ gui_stats['last_error'] = error_msg
1581
+ gui_stats['last_error_time'] = datetime.datetime.now().strftime("%H:%M:%S")
1582
+
1583
  # All methods failed
1584
  return f"Error generating analysis: All methods failed to process the image"
1585
  except Exception as e:
1586
+ error_msg = f"Fatal error in process_image_with_text: {str(e)}"
1587
+ logger.error(error_msg)
1588
  logger.error(traceback.format_exc())
1589
+
1590
+ # Update in-memory error statistics
1591
+ gui_stats['errors'] += 1
1592
+ gui_stats['last_error'] = error_msg
1593
+ gui_stats['last_error_time'] = datetime.datetime.now().strftime("%H:%M:%S")
1594
+
1595
  return f"Error processing image: {str(e)}"
1596
 
1597
  # Function to get log file content
 
1638
  "Summarize what you see in this image in one paragraph."
1639
  ]
1640
 
1641
+ # Function to get the most recent debug logs
1642
+ def get_debug_logs(num_lines=50):
1643
+ return gui_log_handler.get_logs(last_n=num_lines)
1644
+
1645
+ # Function to update logs in real-time
1646
+ def update_logs(history):
1647
+ latest = gui_log_handler.get_latest()
1648
+ if latest:
1649
+ history = history + "\n" + latest if history else latest
1650
+ # Keep only the last 50 lines for performance
1651
+ lines = history.split("\n")
1652
+ if len(lines) > 50:
1653
+ history = "\n".join(lines[-50:])
1654
+ return history
1655
+
1656
+ # Function to clear logs
1657
+ def clear_logs():
1658
+ gui_log_handler.clear()
1659
+ return ""
1660
+
1661
  # Create tabs for different modes
1662
  with gr.Blocks(title="InternVL2.5 Image Analyzer", theme=gr.themes.Soft()) as demo:
1663
  gr.Markdown("# InternVL2.5 Image Analyzer")
1664
  gr.Markdown("Analyze images using the InternVL2.5 model. You can upload individual images or analyze all images in a folder.")
1665
 
1666
+ # Debug mode toggle and panel
1667
+ with gr.Accordion("Debug Console", open=False) as debug_accordion:
1668
+ with gr.Row():
1669
+ with gr.Column(scale=4):
1670
+ debug_output = gr.Textbox(
1671
+ label="Real-time Debug Logs",
1672
+ value=get_debug_logs(20),
1673
+ lines=8,
1674
+ max_lines=15,
1675
+ autoscroll=True,
1676
+ elem_id="debug_output"
1677
+ )
1678
+ with gr.Column(scale=1):
1679
+ with gr.Row():
1680
+ clear_btn = gr.Button("Clear Logs")
1681
+ refresh_btn = gr.Button("Refresh")
1682
+
1683
+ debug_level = gr.Radio(
1684
+ ["ERROR", "WARNING", "INFO", "DEBUG"],
1685
+ label="Debug Level",
1686
+ value="INFO"
1687
+ )
1688
+
1689
+ # Track error counts
1690
+ error_count = gr.Number(value=0, label="Errors", precision=0)
1691
+ warning_count = gr.Number(value=0, label="Warnings", precision=0)
1692
+
1693
+ # Stats display
1694
+ debug_stats_html = gr.HTML(format_debug_stats_html())
1695
+
1696
+ # Add option to enable GUI logging for all operations
1697
+ enable_full_logging = gr.Checkbox(label="Log All Operations to Console", value=False)
1698
+
1699
+ # Function to update stats display
1700
+ def update_stats_display():
1701
+ return format_debug_stats_html()
1702
+
1703
+ # Set up a timer to update stats every few seconds
1704
+ gr.on(
1705
+ triggers=[debug_accordion.open],
1706
+ fn=update_stats_display,
1707
+ outputs=[debug_stats_html],
1708
+ every=5 # Update every 5 seconds when accordion is open
1709
+ )
1710
+
1711
+ # Update counts periodically
1712
+ def update_error_counts():
1713
+ return gui_stats['errors'], gui_stats['warnings']
1714
+
1715
+ gr.on(
1716
+ triggers=[debug_accordion.open],
1717
+ fn=update_error_counts,
1718
+ outputs=[error_count, warning_count],
1719
+ every=2
1720
+ )
1721
+
1722
+ # Debug info about model
1723
+ with gr.Accordion("Model Information", open=False):
1724
+ if torch.cuda.is_available():
1725
+ gpu_info = f"CUDA available: {torch.cuda.device_count()} GPU(s)\n"
1726
+ for i in range(torch.cuda.device_count()):
1727
+ gpu_info += f"- GPU {i}: {torch.cuda.get_device_name(i)}\n"
1728
+ gpu_info += f"Total memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB"
1729
+ else:
1730
+ gpu_info = "CUDA not available - using CPU"
1731
+
1732
+ gr.Textbox(value=gpu_info, label="GPU Information", lines=4)
1733
+
1734
+ model_info = f"Model: {MODEL_NAME}\nImage size: {IMAGE_SIZE}x{IMAGE_SIZE}"
1735
+ gr.Textbox(value=model_info, label="Model Configuration", lines=2)
1736
+
1737
+ # Function to get current memory usage
1738
+ def get_memory_usage():
1739
+ if torch.cuda.is_available():
1740
+ allocated = torch.cuda.memory_allocated() / 1e9 # GB
1741
+ reserved = torch.cuda.memory_reserved() / 1e9 # GB
1742
+ max_memory = torch.cuda.max_memory_allocated() / 1e9 # GB
1743
+ return f"Allocated: {allocated:.2f} GB\nReserved: {reserved:.2f} GB\nMax used: {max_memory:.2f} GB"
1744
+ return "No GPU available"
1745
+
1746
+ memory_usage = gr.Textbox(
1747
+ value=get_memory_usage(),
1748
+ label="Current GPU Memory Usage",
1749
+ lines=3
1750
+ )
1751
+
1752
+ # Refresh memory usage
1753
+ refresh_memory_btn = gr.Button("Refresh Memory Info")
1754
+ refresh_memory_btn.click(
1755
+ fn=get_memory_usage,
1756
+ inputs=[],
1757
+ outputs=[memory_usage]
1758
+ )
1759
+
1760
+ # Download debug logs button
1761
+ gr.Markdown("### Download Complete Debug Logs")
1762
+
1763
+ def get_log_file_path():
1764
+ return log_file if os.path.exists(log_file) else None
1765
+
1766
+ download_log_btn = gr.Button("Download Full Log File")
1767
+ log_file_output = gr.File(label="Log File for Download")
1768
+
1769
+ download_log_btn.click(
1770
+ fn=get_log_file_path,
1771
+ inputs=[],
1772
+ outputs=[log_file_output]
1773
+ )
1774
+
1775
+ # Set up log level change handler
1776
+ def change_log_level(level):
1777
+ if level == "ERROR":
1778
+ gui_log_handler.setLevel(logging.ERROR)
1779
+ logger.info(f"Debug display log level set to ERROR")
1780
+ elif level == "WARNING":
1781
+ gui_log_handler.setLevel(logging.WARNING)
1782
+ logger.info(f"Debug display log level set to WARNING")
1783
+ elif level == "INFO":
1784
+ gui_log_handler.setLevel(logging.INFO)
1785
+ logger.info(f"Debug display log level set to INFO")
1786
+ else: # DEBUG
1787
+ gui_log_handler.setLevel(logging.DEBUG)
1788
+ logger.info(f"Debug display log level set to DEBUG")
1789
+ return f"Log level set to {level}"
1790
+
1791
+ debug_level.change(
1792
+ fn=change_log_level,
1793
+ inputs=[debug_level],
1794
+ outputs=[]
1795
+ )
1796
+
1797
+ # Button handlers
1798
+ clear_btn.click(
1799
+ fn=clear_logs,
1800
+ inputs=[],
1801
+ outputs=[debug_output]
1802
+ )
1803
+
1804
+ refresh_btn.click(
1805
+ fn=get_debug_logs,
1806
+ inputs=[],
1807
+ outputs=[debug_output]
1808
+ )
1809
+
1810
+ # Set up automatic refresh of debug logs
1811
+ debug_output.change(
1812
+ fn=update_logs,
1813
+ inputs=[debug_output],
1814
+ outputs=[debug_output],
1815
+ every=1 # Update every second
1816
+ )
1817
+
1818
+ # Main tabs for functionality
1819
  with gr.Tabs():
1820
  # Tab for single image analysis
1821
  with gr.Tab("Single Image Analysis"):