Update app.py
Browse files
app.py
CHANGED
|
@@ -15,365 +15,916 @@ import io
|
|
| 15 |
import base64
|
| 16 |
import requests
|
| 17 |
import warnings
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
# Suppress warnings
|
| 20 |
warnings.filterwarnings("ignore")
|
| 21 |
|
| 22 |
-
class
|
| 23 |
def __init__(self):
|
| 24 |
-
|
| 25 |
self.load_models()
|
|
|
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
def load_models(self):
|
| 28 |
-
"""Load all required models"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
try:
|
| 30 |
print("Loading BLIP model...")
|
| 31 |
-
# BLIP for image captioning and understanding
|
| 32 |
self.blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 33 |
self.blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
|
|
|
| 34 |
|
| 35 |
print("Loading TrOCR model...")
|
| 36 |
-
# TrOCR for text extraction
|
| 37 |
self.trocr_processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed")
|
| 38 |
self.trocr_model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-printed")
|
|
|
|
| 39 |
|
| 40 |
print("Loading EasyOCR...")
|
| 41 |
-
|
| 42 |
-
self.
|
| 43 |
|
| 44 |
-
# Florence-2 for advanced understanding
|
| 45 |
try:
|
| 46 |
print("Attempting to load Florence-2...")
|
| 47 |
self.florence_processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
|
| 48 |
self.florence_model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
|
| 49 |
-
self.
|
| 50 |
print("Florence-2 loaded successfully!")
|
| 51 |
except Exception as e:
|
| 52 |
print(f"Florence-2 not available: {e}")
|
| 53 |
-
self.
|
| 54 |
|
| 55 |
-
print("
|
| 56 |
|
| 57 |
except Exception as e:
|
| 58 |
print(f"Error loading models: {e}")
|
| 59 |
raise e
|
| 60 |
|
| 61 |
-
def
|
| 62 |
-
"""
|
| 63 |
-
|
| 64 |
-
return "Please upload an image first."
|
| 65 |
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
try:
|
| 69 |
# Convert to PIL Image if needed
|
| 70 |
if not isinstance(image, Image.Image):
|
| 71 |
image = Image.fromarray(image).convert('RGB')
|
| 72 |
|
| 73 |
-
#
|
| 74 |
-
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
-
#
|
| 77 |
-
|
| 78 |
|
| 79 |
-
#
|
| 80 |
-
|
| 81 |
|
| 82 |
-
#
|
| 83 |
-
|
| 84 |
-
results['data_points'] = self.extract_data_points(image, results['chart_type'])
|
| 85 |
|
| 86 |
-
# Advanced analysis with Florence-2
|
| 87 |
-
if self.
|
| 88 |
-
|
| 89 |
|
| 90 |
-
return
|
| 91 |
|
| 92 |
except Exception as e:
|
| 93 |
-
|
|
|
|
| 94 |
|
| 95 |
-
def
|
| 96 |
-
"""
|
| 97 |
try:
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
-
def
|
| 106 |
-
"""
|
| 107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
|
| 109 |
-
#
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
try:
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
'
|
| 136 |
-
'
|
| 137 |
-
'
|
| 138 |
-
'
|
| 139 |
-
'
|
| 140 |
-
'
|
| 141 |
-
'
|
| 142 |
}
|
| 143 |
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
return "Unknown Chart Type"
|
| 149 |
|
| 150 |
-
def
|
| 151 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
try:
|
| 153 |
-
# This is a simplified version - real implementation would be more sophisticated
|
| 154 |
-
# Convert to grayscale for analysis
|
| 155 |
image_np = np.array(image.convert('L'))
|
| 156 |
|
| 157 |
-
#
|
| 158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
|
| 160 |
-
#
|
| 161 |
-
|
| 162 |
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
}
|
| 168 |
|
| 169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
|
| 171 |
except Exception as e:
|
| 172 |
-
|
|
|
|
|
|
|
| 173 |
|
| 174 |
-
def
|
| 175 |
-
"""Advanced analysis using Florence-2"""
|
| 176 |
-
if not self.
|
| 177 |
-
return "Florence-2 model not available"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
|
| 179 |
try:
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
num_beams=3
|
| 195 |
-
)
|
| 196 |
-
generated_text = self.florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
| 197 |
-
results[prompt] = generated_text
|
| 198 |
|
| 199 |
-
return
|
| 200 |
-
|
| 201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
|
| 203 |
-
def
|
| 204 |
-
"""Format results for display"""
|
| 205 |
-
formatted = "# Chart Analysis Results\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
|
| 207 |
-
|
| 208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
|
| 210 |
-
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
|
| 218 |
-
|
| 219 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
|
| 221 |
-
|
| 222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
return formatted
|
| 225 |
|
| 226 |
-
# Initialize the analyzer
|
| 227 |
-
analyzer =
|
| 228 |
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
|
| 233 |
-
#
|
| 234 |
-
with gr.Blocks(title="Chart Analyzer
|
| 235 |
-
gr.Markdown("# π Chart Analyzer
|
| 236 |
-
gr.Markdown("Upload a chart image
|
| 237 |
|
| 238 |
with gr.Row():
|
| 239 |
with gr.Column(scale=1):
|
| 240 |
-
gr.Markdown("##
|
| 241 |
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
label="Upload Chart Image",
|
| 248 |
-
height=400,
|
| 249 |
-
sources=["upload", "webcam", "clipboard"],
|
| 250 |
-
format="png"
|
| 251 |
-
)
|
| 252 |
-
gr.Markdown("**Supported formats:** PNG, JPG, JPEG, GIF, BMP")
|
| 253 |
-
gr.Markdown("**Max size:** 10MB")
|
| 254 |
-
|
| 255 |
-
with gr.Tab("π From URL"):
|
| 256 |
-
url_input = gr.Textbox(
|
| 257 |
-
label="Image URL",
|
| 258 |
-
placeholder="https://example.com/chart.png"
|
| 259 |
-
)
|
| 260 |
-
load_url_btn = gr.Button("Load from URL")
|
| 261 |
|
| 262 |
-
# Analysis options
|
| 263 |
-
gr.Markdown("## βοΈ Analysis Settings")
|
| 264 |
analysis_type = gr.Dropdown(
|
| 265 |
-
choices=
|
| 266 |
value="comprehensive",
|
| 267 |
label="Analysis Type",
|
| 268 |
-
info="Choose
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
)
|
| 270 |
|
| 271 |
-
with gr.Accordion("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 272 |
confidence_threshold = gr.Slider(
|
| 273 |
minimum=0.1,
|
| 274 |
maximum=1.0,
|
| 275 |
value=0.5,
|
| 276 |
label="OCR Confidence Threshold"
|
| 277 |
)
|
| 278 |
-
use_florence = gr.Checkbox(
|
| 279 |
-
label="Use Florence-2 (Advanced Analysis)",
|
| 280 |
-
value=True
|
| 281 |
-
)
|
| 282 |
|
| 283 |
analyze_btn = gr.Button("π Analyze Chart", variant="primary", size="lg")
|
| 284 |
clear_btn = gr.Button("ποΈ Clear All", variant="secondary")
|
| 285 |
|
| 286 |
with gr.Column(scale=2):
|
| 287 |
-
gr.Markdown("##
|
| 288 |
-
output = gr.Markdown(
|
| 289 |
-
value="Upload an image and click 'Analyze Chart' to see results here.",
|
| 290 |
-
label="Results"
|
| 291 |
-
)
|
| 292 |
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 297 |
|
| 298 |
-
#
|
| 299 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
try:
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
except Exception as e:
|
| 307 |
-
|
|
|
|
| 308 |
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
if
|
| 312 |
-
return
|
| 313 |
|
| 314 |
try:
|
| 315 |
-
|
|
|
|
|
|
|
| 316 |
|
| 317 |
-
#
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
"confidence_threshold": confidence_threshold,
|
| 321 |
-
"models_used": ["BLIP", "TrOCR", "EasyOCR"],
|
| 322 |
-
"timestamp": pd.Timestamp.now().isoformat()
|
| 323 |
-
}
|
| 324 |
|
| 325 |
-
if
|
| 326 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 327 |
|
| 328 |
-
return
|
| 329 |
|
| 330 |
except Exception as e:
|
| 331 |
-
|
| 332 |
-
return error_msg, {"error": error_msg}, None
|
| 333 |
-
|
| 334 |
-
# Clear function
|
| 335 |
-
def clear_all():
|
| 336 |
-
return None, "Upload an image and click 'Analyze Chart' to see results here.", {}, None
|
| 337 |
-
|
| 338 |
-
# Examples
|
| 339 |
-
# gr.Examples(
|
| 340 |
-
# examples=[
|
| 341 |
-
# ["https://via.placeholder.com/600x400/0066CC/FFFFFF?text=Sample+Bar+Chart", "comprehensive"],
|
| 342 |
-
# ["https://via.placeholder.com/600x400/FF6B35/FFFFFF?text=Sample+Line+Chart", "data_extraction"],
|
| 343 |
-
# ],
|
| 344 |
-
# inputs=[image_input, analysis_type],
|
| 345 |
-
# label="Try these examples:"
|
| 346 |
-
# )
|
| 347 |
|
| 348 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
analyze_btn.click(
|
| 350 |
-
fn=
|
| 351 |
-
inputs=[image_input, analysis_type,
|
| 352 |
-
outputs=[
|
| 353 |
)
|
| 354 |
|
| 355 |
-
|
| 356 |
-
fn=
|
| 357 |
-
inputs=[
|
| 358 |
-
outputs=[
|
| 359 |
)
|
| 360 |
|
| 361 |
clear_btn.click(
|
| 362 |
-
fn=
|
| 363 |
-
outputs=[image_input,
|
| 364 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 365 |
|
| 366 |
if __name__ == "__main__":
|
| 367 |
-
print("Starting Chart Analyzer...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 368 |
try:
|
| 369 |
demo.launch(
|
| 370 |
server_name="0.0.0.0",
|
| 371 |
server_port=7860,
|
| 372 |
share=False,
|
| 373 |
show_error=True,
|
| 374 |
-
|
| 375 |
)
|
| 376 |
except Exception as e:
|
| 377 |
-
print(f"Error launching app: {e}")
|
| 378 |
-
|
| 379 |
demo.launch()
|
|
|
|
| 15 |
import base64
|
| 16 |
import requests
|
| 17 |
import warnings
|
| 18 |
+
import json
|
| 19 |
+
from datetime import datetime
|
| 20 |
+
from typing import Dict, List, Any, Optional
|
| 21 |
+
import re
|
| 22 |
|
| 23 |
# Suppress warnings
|
| 24 |
warnings.filterwarnings("ignore")
|
| 25 |
|
| 26 |
+
class StructuredChartAnalyzer:
|
| 27 |
def __init__(self):
|
| 28 |
+
"""Initialize the enhanced chart analyzer with structured output capabilities"""
|
| 29 |
self.load_models()
|
| 30 |
+
self.prompt_templates = self._init_prompt_templates()
|
| 31 |
|
| 32 |
+
def _init_prompt_templates(self) -> Dict[str, str]:
|
| 33 |
+
"""Initialize predefined prompt templates for different analysis types"""
|
| 34 |
+
return {
|
| 35 |
+
"comprehensive": "Analyze this chart comprehensively. Identify the chart type, extract all visible text including titles, labels, legends, and data values. Describe the data trends, patterns, and key insights.",
|
| 36 |
+
|
| 37 |
+
"data_extraction": "Focus on extracting numerical data from this chart. Identify all data points, values, categories, and measurements. Pay special attention to axis labels, data series, and quantitative information.",
|
| 38 |
+
|
| 39 |
+
"visual_elements": "Describe the visual elements of this chart including colors, chart type, layout, axes, legends, and overall design. Focus on the structural components.",
|
| 40 |
+
|
| 41 |
+
"trend_analysis": "Analyze the trends and patterns shown in this chart. Identify increasing/decreasing trends, correlations, outliers, and significant data patterns. Provide insights about what the data reveals.",
|
| 42 |
+
|
| 43 |
+
"accessibility": "Describe this chart in a way that would be helpful for visually impaired users. Include all textual content, data relationships, and key findings in a clear, structured manner.",
|
| 44 |
+
|
| 45 |
+
"business_insights": "Analyze this chart from a business perspective. What are the key performance indicators, trends, and actionable insights that can be derived from this data?"
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
def load_models(self):
|
| 49 |
+
"""Load all required models with better error handling"""
|
| 50 |
+
self.models_loaded = {
|
| 51 |
+
'blip': False,
|
| 52 |
+
'trocr': False,
|
| 53 |
+
'easyocr': False,
|
| 54 |
+
'florence': False
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
try:
|
| 58 |
print("Loading BLIP model...")
|
|
|
|
| 59 |
self.blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 60 |
self.blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 61 |
+
self.models_loaded['blip'] = True
|
| 62 |
|
| 63 |
print("Loading TrOCR model...")
|
|
|
|
| 64 |
self.trocr_processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed")
|
| 65 |
self.trocr_model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-printed")
|
| 66 |
+
self.models_loaded['trocr'] = True
|
| 67 |
|
| 68 |
print("Loading EasyOCR...")
|
| 69 |
+
self.ocr_reader = easyocr.Reader(['en'], gpu=False)
|
| 70 |
+
self.models_loaded['easyocr'] = True
|
| 71 |
|
| 72 |
+
# Florence-2 for advanced understanding
|
| 73 |
try:
|
| 74 |
print("Attempting to load Florence-2...")
|
| 75 |
self.florence_processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
|
| 76 |
self.florence_model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
|
| 77 |
+
self.models_loaded['florence'] = True
|
| 78 |
print("Florence-2 loaded successfully!")
|
| 79 |
except Exception as e:
|
| 80 |
print(f"Florence-2 not available: {e}")
|
| 81 |
+
self.models_loaded['florence'] = False
|
| 82 |
|
| 83 |
+
print("Model loading completed!")
|
| 84 |
|
| 85 |
except Exception as e:
|
| 86 |
print(f"Error loading models: {e}")
|
| 87 |
raise e
|
| 88 |
|
| 89 |
+
def analyze_chart_with_prompt(self, image, custom_prompt: str = None, analysis_type: str = "comprehensive") -> Dict[str, Any]:
|
| 90 |
+
"""
|
| 91 |
+
Main function to analyze charts with structured JSON output
|
|
|
|
| 92 |
|
| 93 |
+
Args:
|
| 94 |
+
image: PIL Image or numpy array
|
| 95 |
+
custom_prompt: Custom analysis prompt
|
| 96 |
+
analysis_type: Type of analysis to perform
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
Structured dictionary with analysis results
|
| 100 |
+
"""
|
| 101 |
+
# Initialize structured output
|
| 102 |
+
structured_output = {
|
| 103 |
+
"metadata": {
|
| 104 |
+
"timestamp": datetime.now().isoformat(),
|
| 105 |
+
"analysis_type": analysis_type,
|
| 106 |
+
"models_used": [model for model, loaded in self.models_loaded.items() if loaded],
|
| 107 |
+
"prompt_used": custom_prompt or self.prompt_templates.get(analysis_type, self.prompt_templates["comprehensive"])
|
| 108 |
+
},
|
| 109 |
+
"image_info": {},
|
| 110 |
+
"text_extraction": {},
|
| 111 |
+
"chart_analysis": {},
|
| 112 |
+
"data_insights": {},
|
| 113 |
+
"quality_metrics": {},
|
| 114 |
+
"errors": []
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
if image is None:
|
| 118 |
+
structured_output["errors"].append("No image provided")
|
| 119 |
+
return structured_output
|
| 120 |
|
| 121 |
try:
|
| 122 |
# Convert to PIL Image if needed
|
| 123 |
if not isinstance(image, Image.Image):
|
| 124 |
image = Image.fromarray(image).convert('RGB')
|
| 125 |
|
| 126 |
+
# Extract image metadata
|
| 127 |
+
structured_output["image_info"] = self._extract_image_info(image)
|
| 128 |
+
|
| 129 |
+
# Text extraction with multiple methods
|
| 130 |
+
structured_output["text_extraction"] = self._extract_text_comprehensive(image)
|
| 131 |
|
| 132 |
+
# Chart type and structure analysis
|
| 133 |
+
structured_output["chart_analysis"] = self._analyze_chart_structure(image, structured_output["text_extraction"])
|
| 134 |
|
| 135 |
+
# Data insights extraction
|
| 136 |
+
structured_output["data_insights"] = self._extract_data_insights(image, structured_output)
|
| 137 |
|
| 138 |
+
# Quality assessment
|
| 139 |
+
structured_output["quality_metrics"] = self._assess_quality(image, structured_output)
|
|
|
|
| 140 |
|
| 141 |
+
# Advanced analysis with Florence-2 if available and requested
|
| 142 |
+
if self.models_loaded['florence'] and analysis_type in ["comprehensive", "advanced"]:
|
| 143 |
+
structured_output["advanced_analysis"] = self._florence_advanced_analysis(image, custom_prompt)
|
| 144 |
|
| 145 |
+
return structured_output
|
| 146 |
|
| 147 |
except Exception as e:
|
| 148 |
+
structured_output["errors"].append(f"Analysis error: {str(e)}")
|
| 149 |
+
return structured_output
|
| 150 |
|
| 151 |
+
def _extract_image_info(self, image: Image.Image) -> Dict[str, Any]:
|
| 152 |
+
"""Extract basic image information"""
|
| 153 |
try:
|
| 154 |
+
return {
|
| 155 |
+
"dimensions": {
|
| 156 |
+
"width": image.size[0],
|
| 157 |
+
"height": image.size[1]
|
| 158 |
+
},
|
| 159 |
+
"format": image.format or "Unknown",
|
| 160 |
+
"mode": image.mode,
|
| 161 |
+
"has_transparency": image.mode in ("RGBA", "LA"),
|
| 162 |
+
"aspect_ratio": round(image.size[0] / image.size[1], 2)
|
| 163 |
+
}
|
| 164 |
+
except Exception as e:
|
| 165 |
+
return {"error": str(e)}
|
| 166 |
|
| 167 |
+
def _extract_text_comprehensive(self, image: Image.Image) -> Dict[str, Any]:
|
| 168 |
+
"""Comprehensive text extraction with multiple methods"""
|
| 169 |
+
text_results = {
|
| 170 |
+
"methods_used": [],
|
| 171 |
+
"extracted_texts": {},
|
| 172 |
+
"confidence_scores": {},
|
| 173 |
+
"combined_text": "",
|
| 174 |
+
"detected_numbers": [],
|
| 175 |
+
"detected_labels": []
|
| 176 |
+
}
|
| 177 |
|
| 178 |
+
# TrOCR extraction
|
| 179 |
+
if self.models_loaded['trocr']:
|
| 180 |
+
try:
|
| 181 |
+
pixel_values = self.trocr_processor(image, return_tensors="pt").pixel_values
|
| 182 |
+
generated_ids = self.trocr_model.generate(pixel_values, max_length=200)
|
| 183 |
+
trocr_text = self.trocr_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 184 |
+
text_results["extracted_texts"]["trocr"] = trocr_text
|
| 185 |
+
text_results["methods_used"].append("TrOCR")
|
| 186 |
+
except Exception as e:
|
| 187 |
+
text_results["extracted_texts"]["trocr"] = f"Error: {str(e)}"
|
| 188 |
+
|
| 189 |
+
# EasyOCR extraction
|
| 190 |
+
if self.models_loaded['easyocr']:
|
| 191 |
+
try:
|
| 192 |
+
image_np = np.array(image)
|
| 193 |
+
ocr_results = self.ocr_reader.readtext(image_np)
|
| 194 |
+
|
| 195 |
+
easyocr_data = []
|
| 196 |
+
for bbox, text, confidence in ocr_results:
|
| 197 |
+
easyocr_data.append({
|
| 198 |
+
"text": text,
|
| 199 |
+
"confidence": float(confidence),
|
| 200 |
+
"bbox": bbox
|
| 201 |
+
})
|
| 202 |
+
|
| 203 |
+
easyocr_text = ' '.join([result["text"] for result in easyocr_data])
|
| 204 |
+
text_results["extracted_texts"]["easyocr"] = easyocr_text
|
| 205 |
+
text_results["confidence_scores"]["easyocr"] = easyocr_data
|
| 206 |
+
text_results["methods_used"].append("EasyOCR")
|
| 207 |
+
except Exception as e:
|
| 208 |
+
text_results["extracted_texts"]["easyocr"] = f"Error: {str(e)}"
|
| 209 |
+
|
| 210 |
+
# Combine and analyze text
|
| 211 |
+
all_texts = [text for text in text_results["extracted_texts"].values() if not text.startswith("Error:")]
|
| 212 |
+
text_results["combined_text"] = " ".join(all_texts)
|
| 213 |
+
|
| 214 |
+
# Extract numbers and potential labels
|
| 215 |
+
text_results["detected_numbers"] = self._extract_numbers(text_results["combined_text"])
|
| 216 |
+
text_results["detected_labels"] = self._extract_potential_labels(text_results["combined_text"])
|
| 217 |
|
| 218 |
+
return text_results
|
| 219 |
+
|
| 220 |
+
def _extract_numbers(self, text: str) -> List[Dict[str, Any]]:
|
| 221 |
+
"""Extract numbers from text with context"""
|
| 222 |
+
number_patterns = [
|
| 223 |
+
r'\d+\.?\d*%', # Percentages
|
| 224 |
+
r'\$\d+\.?\d*', # Currency
|
| 225 |
+
r'\d{1,3}(?:,\d{3})*\.?\d*', # Numbers with commas
|
| 226 |
+
r'\d+\.?\d*' # Simple numbers
|
| 227 |
+
]
|
| 228 |
+
|
| 229 |
+
numbers = []
|
| 230 |
+
for pattern in number_patterns:
|
| 231 |
+
matches = re.finditer(pattern, text)
|
| 232 |
+
for match in matches:
|
| 233 |
+
numbers.append({
|
| 234 |
+
"value": match.group(),
|
| 235 |
+
"position": match.span(),
|
| 236 |
+
"type": "percentage" if "%" in match.group() else
|
| 237 |
+
"currency" if "$" in match.group() else "number"
|
| 238 |
+
})
|
| 239 |
+
|
| 240 |
+
return numbers
|
| 241 |
+
|
| 242 |
+
def _extract_potential_labels(self, text: str) -> List[str]:
|
| 243 |
+
"""Extract potential chart labels and categories"""
|
| 244 |
+
# Simple heuristic to find potential labels
|
| 245 |
+
words = text.split()
|
| 246 |
+
potential_labels = []
|
| 247 |
+
|
| 248 |
+
for word in words:
|
| 249 |
+
# Skip pure numbers
|
| 250 |
+
if re.match(r'^\d+\.?\d*$', word):
|
| 251 |
+
continue
|
| 252 |
+
# Skip very short words
|
| 253 |
+
if len(word) < 2:
|
| 254 |
+
continue
|
| 255 |
+
# Add words that might be labels
|
| 256 |
+
if word.istitle() or word.isupper():
|
| 257 |
+
potential_labels.append(word)
|
| 258 |
+
|
| 259 |
+
return list(set(potential_labels))
|
| 260 |
+
|
| 261 |
+
def _analyze_chart_structure(self, image: Image.Image, text_data: Dict) -> Dict[str, Any]:
|
| 262 |
+
"""Analyze chart structure and type"""
|
| 263 |
+
analysis = {
|
| 264 |
+
"chart_type": "unknown",
|
| 265 |
+
"confidence": 0.0,
|
| 266 |
+
"visual_elements": {},
|
| 267 |
+
"layout_analysis": {}
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
# Get image description from BLIP
|
| 271 |
+
if self.models_loaded['blip']:
|
| 272 |
+
try:
|
| 273 |
+
inputs = self.blip_processor(image, return_tensors="pt")
|
| 274 |
+
out = self.blip_model.generate(**inputs, max_length=150)
|
| 275 |
+
description = self.blip_processor.decode(out[0], skip_special_tokens=True)
|
| 276 |
+
analysis["description"] = description
|
| 277 |
+
|
| 278 |
+
# Chart type detection based on description and text
|
| 279 |
+
analysis["chart_type"] = self._detect_chart_type_advanced(description, text_data["combined_text"])
|
| 280 |
+
|
| 281 |
+
except Exception as e:
|
| 282 |
+
analysis["description"] = f"Error: {str(e)}"
|
| 283 |
+
|
| 284 |
+
# Visual analysis
|
| 285 |
try:
|
| 286 |
+
analysis["visual_elements"] = self._analyze_visual_elements(image)
|
| 287 |
+
analysis["layout_analysis"] = self._analyze_layout(image)
|
| 288 |
+
except Exception as e:
|
| 289 |
+
analysis["visual_elements"] = {"error": str(e)}
|
| 290 |
+
|
| 291 |
+
return analysis
|
| 292 |
+
|
| 293 |
+
def _detect_chart_type_advanced(self, description: str, text: str) -> str:
|
| 294 |
+
"""Advanced chart type detection with confidence scoring"""
|
| 295 |
+
combined_text = (description + " " + text).lower()
|
| 296 |
+
|
| 297 |
+
chart_indicators = {
|
| 298 |
+
'bar_chart': ['bar', 'column', 'histogram', 'vertical bars', 'horizontal bars'],
|
| 299 |
+
'line_chart': ['line', 'trend', 'time series', 'curve', 'linear'],
|
| 300 |
+
'pie_chart': ['pie', 'circular', 'slice', 'wedge', 'donut'],
|
| 301 |
+
'scatter_plot': ['scatter', 'correlation', 'points', 'dots', 'plot'],
|
| 302 |
+
'area_chart': ['area', 'filled', 'stacked area'],
|
| 303 |
+
'box_plot': ['box', 'whisker', 'quartile', 'median'],
|
| 304 |
+
'heatmap': ['heat', 'color coded', 'matrix', 'intensity'],
|
| 305 |
+
'gauge': ['gauge', 'dial', 'speedometer', 'meter'],
|
| 306 |
+
'funnel': ['funnel', 'conversion', 'stages'],
|
| 307 |
+
'radar': ['radar', 'spider', 'web chart']
|
| 308 |
}
|
| 309 |
|
| 310 |
+
scores = {}
|
| 311 |
+
for chart_type, keywords in chart_indicators.items():
|
| 312 |
+
score = sum(1 for keyword in keywords if keyword in combined_text)
|
| 313 |
+
if score > 0:
|
| 314 |
+
scores[chart_type] = score
|
| 315 |
+
|
| 316 |
+
if scores:
|
| 317 |
+
best_match = max(scores.items(), key=lambda x: x[1])
|
| 318 |
+
return best_match[0].replace('_', ' ').title()
|
| 319 |
|
| 320 |
return "Unknown Chart Type"
|
| 321 |
|
| 322 |
+
def _analyze_visual_elements(self, image: Image.Image) -> Dict[str, Any]:
|
| 323 |
+
"""Analyze visual elements of the chart"""
|
| 324 |
+
try:
|
| 325 |
+
image_np = np.array(image)
|
| 326 |
+
|
| 327 |
+
# Color analysis
|
| 328 |
+
colors = image_np.reshape(-1, 3)
|
| 329 |
+
unique_colors = np.unique(colors, axis=0)
|
| 330 |
+
dominant_colors = self._get_dominant_colors(colors)
|
| 331 |
+
|
| 332 |
+
# Edge analysis
|
| 333 |
+
gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
|
| 334 |
+
edges = cv2.Canny(gray, 50, 150)
|
| 335 |
+
|
| 336 |
+
return {
|
| 337 |
+
"color_count": len(unique_colors),
|
| 338 |
+
"dominant_colors": dominant_colors,
|
| 339 |
+
"edge_density": np.sum(edges > 0) / edges.size,
|
| 340 |
+
"brightness": float(np.mean(gray) / 255),
|
| 341 |
+
"contrast": float(np.std(gray) / 255)
|
| 342 |
+
}
|
| 343 |
+
except Exception as e:
|
| 344 |
+
return {"error": str(e)}
|
| 345 |
+
|
| 346 |
+
def _get_dominant_colors(self, colors: np.ndarray, n_colors: int = 5) -> List[List[int]]:
|
| 347 |
+
"""Get dominant colors from image"""
|
| 348 |
+
try:
|
| 349 |
+
from sklearn.cluster import KMeans
|
| 350 |
+
kmeans = KMeans(n_clusters=min(n_colors, len(np.unique(colors, axis=0))), random_state=42)
|
| 351 |
+
kmeans.fit(colors)
|
| 352 |
+
return [color.astype(int).tolist() for color in kmeans.cluster_centers_]
|
| 353 |
+
except:
|
| 354 |
+
# Fallback without sklearn
|
| 355 |
+
unique_colors = np.unique(colors, axis=0)
|
| 356 |
+
return unique_colors[:n_colors].tolist()
|
| 357 |
+
|
| 358 |
+
def _analyze_layout(self, image: Image.Image) -> Dict[str, Any]:
|
| 359 |
+
"""Analyze chart layout and structure"""
|
| 360 |
try:
|
|
|
|
|
|
|
| 361 |
image_np = np.array(image.convert('L'))
|
| 362 |
|
| 363 |
+
# Find potential axes
|
| 364 |
+
h_lines = self._detect_horizontal_lines(image_np)
|
| 365 |
+
v_lines = self._detect_vertical_lines(image_np)
|
| 366 |
+
|
| 367 |
+
return {
|
| 368 |
+
"horizontal_lines": len(h_lines),
|
| 369 |
+
"vertical_lines": len(v_lines),
|
| 370 |
+
"has_grid": len(h_lines) > 2 and len(v_lines) > 2,
|
| 371 |
+
"image_regions": self._identify_regions(image_np)
|
| 372 |
+
}
|
| 373 |
+
except Exception as e:
|
| 374 |
+
return {"error": str(e)}
|
| 375 |
+
|
| 376 |
+
def _detect_horizontal_lines(self, gray_image: np.ndarray) -> List:
|
| 377 |
+
"""Detect horizontal lines in image"""
|
| 378 |
+
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 1))
|
| 379 |
+
detected_lines = cv2.morphologyEx(gray_image, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
|
| 380 |
+
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 381 |
+
return cnts[0] if len(cnts) == 2 else cnts[1]
|
| 382 |
+
|
| 383 |
+
def _detect_vertical_lines(self, gray_image: np.ndarray) -> List:
|
| 384 |
+
"""Detect vertical lines in image"""
|
| 385 |
+
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 25))
|
| 386 |
+
detected_lines = cv2.morphologyEx(gray_image, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
|
| 387 |
+
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 388 |
+
return cnts[0] if len(cnts) == 2 else cnts[1]
|
| 389 |
+
|
| 390 |
+
def _identify_regions(self, image: np.ndarray) -> Dict[str, Any]:
|
| 391 |
+
"""Identify different regions of the chart"""
|
| 392 |
+
h, w = image.shape
|
| 393 |
+
return {
|
| 394 |
+
"title_region": {"y": 0, "height": h // 10},
|
| 395 |
+
"chart_area": {"y": h // 10, "height": int(h * 0.7)},
|
| 396 |
+
"legend_area": {"y": int(h * 0.8), "height": h // 5},
|
| 397 |
+
"total_dimensions": {"width": w, "height": h}
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
def _extract_data_insights(self, image: Image.Image, analysis_data: Dict) -> Dict[str, Any]:
|
| 401 |
+
"""Extract data insights and patterns"""
|
| 402 |
+
insights = {
|
| 403 |
+
"numerical_data": [],
|
| 404 |
+
"categories": [],
|
| 405 |
+
"trends": [],
|
| 406 |
+
"outliers": [],
|
| 407 |
+
"summary_statistics": {}
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
try:
|
| 411 |
+
# Extract numerical values
|
| 412 |
+
numbers = analysis_data["text_extraction"]["detected_numbers"]
|
| 413 |
+
numerical_values = []
|
| 414 |
+
|
| 415 |
+
for num_data in numbers:
|
| 416 |
+
if num_data["type"] == "number":
|
| 417 |
+
try:
|
| 418 |
+
# Clean and convert number
|
| 419 |
+
clean_num = re.sub(r'[,\s]', '', num_data["value"])
|
| 420 |
+
value = float(clean_num)
|
| 421 |
+
numerical_values.append(value)
|
| 422 |
+
except:
|
| 423 |
+
continue
|
| 424 |
+
|
| 425 |
+
if numerical_values:
|
| 426 |
+
insights["numerical_data"] = numerical_values
|
| 427 |
+
insights["summary_statistics"] = {
|
| 428 |
+
"count": len(numerical_values),
|
| 429 |
+
"min": min(numerical_values),
|
| 430 |
+
"max": max(numerical_values),
|
| 431 |
+
"mean": np.mean(numerical_values),
|
| 432 |
+
"median": np.median(numerical_values),
|
| 433 |
+
"std": np.std(numerical_values) if len(numerical_values) > 1 else 0
|
| 434 |
+
}
|
| 435 |
|
| 436 |
+
# Categories from labels
|
| 437 |
+
insights["categories"] = analysis_data["text_extraction"]["detected_labels"]
|
| 438 |
|
| 439 |
+
return insights
|
| 440 |
+
|
| 441 |
+
except Exception as e:
|
| 442 |
+
insights["error"] = str(e)
|
| 443 |
+
return insights
|
| 444 |
+
|
| 445 |
+
def _assess_quality(self, image: Image.Image, analysis_data: Dict) -> Dict[str, Any]:
|
| 446 |
+
"""Assess the quality and readability of the chart"""
|
| 447 |
+
quality = {
|
| 448 |
+
"overall_score": 0.0,
|
| 449 |
+
"readability": {},
|
| 450 |
+
"completeness": {},
|
| 451 |
+
"technical_quality": {}
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
try:
|
| 455 |
+
# Text extraction quality
|
| 456 |
+
text_methods = len(analysis_data["text_extraction"]["methods_used"])
|
| 457 |
+
extracted_text_length = len(analysis_data["text_extraction"]["combined_text"])
|
| 458 |
+
|
| 459 |
+
quality["readability"] = {
|
| 460 |
+
"text_extraction_methods": text_methods,
|
| 461 |
+
"text_length": extracted_text_length,
|
| 462 |
+
"numbers_detected": len(analysis_data["text_extraction"]["detected_numbers"]),
|
| 463 |
+
"labels_detected": len(analysis_data["text_extraction"]["detected_labels"])
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
# Completeness assessment
|
| 467 |
+
has_title = "title" in analysis_data["text_extraction"]["combined_text"].lower()
|
| 468 |
+
has_numbers = len(analysis_data["text_extraction"]["detected_numbers"]) > 0
|
| 469 |
+
has_labels = len(analysis_data["text_extraction"]["detected_labels"]) > 0
|
| 470 |
+
|
| 471 |
+
quality["completeness"] = {
|
| 472 |
+
"has_title": has_title,
|
| 473 |
+
"has_numerical_data": has_numbers,
|
| 474 |
+
"has_labels": has_labels,
|
| 475 |
+
"chart_type_identified": analysis_data["chart_analysis"]["chart_type"] != "Unknown Chart Type"
|
| 476 |
}
|
| 477 |
|
| 478 |
+
# Technical quality
|
| 479 |
+
visual_elements = analysis_data["chart_analysis"].get("visual_elements", {})
|
| 480 |
+
if not visual_elements.get("error"):
|
| 481 |
+
quality["technical_quality"] = {
|
| 482 |
+
"image_brightness": visual_elements.get("brightness", 0),
|
| 483 |
+
"image_contrast": visual_elements.get("contrast", 0),
|
| 484 |
+
"color_diversity": visual_elements.get("color_count", 0),
|
| 485 |
+
"edge_clarity": visual_elements.get("edge_density", 0)
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
# Calculate overall score
|
| 489 |
+
completeness_score = sum(quality["completeness"].values()) / len(quality["completeness"])
|
| 490 |
+
readability_score = min(1.0, (extracted_text_length / 100) * 0.5 + (text_methods / 2) * 0.5)
|
| 491 |
+
|
| 492 |
+
quality["overall_score"] = (completeness_score * 0.6 + readability_score * 0.4)
|
| 493 |
|
| 494 |
except Exception as e:
|
| 495 |
+
quality["error"] = str(e)
|
| 496 |
+
|
| 497 |
+
return quality
|
| 498 |
|
| 499 |
+
def _florence_advanced_analysis(self, image: Image.Image, custom_prompt: str = None) -> Dict[str, Any]:
|
| 500 |
+
"""Advanced analysis using Florence-2 with custom prompts"""
|
| 501 |
+
if not self.models_loaded['florence']:
|
| 502 |
+
return {"error": "Florence-2 model not available"}
|
| 503 |
+
|
| 504 |
+
florence_results = {}
|
| 505 |
+
|
| 506 |
+
# Standard Florence-2 tasks
|
| 507 |
+
florence_tasks = {
|
| 508 |
+
"object_detection": "<OD>",
|
| 509 |
+
"dense_caption": "<DENSE_REGION_CAPTION>",
|
| 510 |
+
"ocr_with_regions": "<OCR_WITH_REGION>",
|
| 511 |
+
"detailed_caption": "<MORE_DETAILED_CAPTION>"
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
# Add custom prompt if provided
|
| 515 |
+
if custom_prompt:
|
| 516 |
+
florence_tasks["custom_analysis"] = f"<CAPTION>{custom_prompt}"
|
| 517 |
|
| 518 |
try:
|
| 519 |
+
for task_name, prompt in florence_tasks.items():
|
| 520 |
+
try:
|
| 521 |
+
inputs = self.florence_processor(text=prompt, images=image, return_tensors="pt")
|
| 522 |
+
generated_ids = self.florence_model.generate(
|
| 523 |
+
input_ids=inputs["input_ids"],
|
| 524 |
+
pixel_values=inputs["pixel_values"],
|
| 525 |
+
max_new_tokens=1024,
|
| 526 |
+
num_beams=3,
|
| 527 |
+
do_sample=False
|
| 528 |
+
)
|
| 529 |
+
generated_text = self.florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
| 530 |
+
florence_results[task_name] = self._parse_florence_output(generated_text, prompt)
|
| 531 |
+
except Exception as e:
|
| 532 |
+
florence_results[task_name] = {"error": str(e)}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 533 |
|
| 534 |
+
return florence_results
|
| 535 |
+
|
| 536 |
+
except Exception as e:
|
| 537 |
+
return {"error": f"Florence-2 analysis failed: {str(e)}"}
|
| 538 |
+
|
| 539 |
+
def _parse_florence_output(self, output: str, prompt: str) -> Dict[str, Any]:
|
| 540 |
+
"""Parse Florence-2 output into structured format"""
|
| 541 |
+
try:
|
| 542 |
+
# Remove the prompt from the output
|
| 543 |
+
if prompt in output:
|
| 544 |
+
parsed_output = output.replace(prompt, "").strip()
|
| 545 |
+
else:
|
| 546 |
+
parsed_output = output.strip()
|
| 547 |
+
|
| 548 |
+
# Try to parse as JSON if it looks like structured data
|
| 549 |
+
if parsed_output.startswith('{') and parsed_output.endswith('}'):
|
| 550 |
+
try:
|
| 551 |
+
return json.loads(parsed_output)
|
| 552 |
+
except:
|
| 553 |
+
pass
|
| 554 |
+
|
| 555 |
+
return {"raw_output": parsed_output}
|
| 556 |
+
|
| 557 |
+
except Exception as e:
|
| 558 |
+
return {"error": str(e), "raw_output": output}
|
| 559 |
|
| 560 |
+
def format_results_for_display(self, structured_output: Dict[str, Any]) -> str:
|
| 561 |
+
"""Format structured results for human-readable display"""
|
| 562 |
+
formatted = "# π Enhanced Chart Analysis Results\n\n"
|
| 563 |
+
|
| 564 |
+
# Metadata
|
| 565 |
+
metadata = structured_output.get("metadata", {})
|
| 566 |
+
formatted += f"**Analysis Type:** {metadata.get('analysis_type', 'Unknown')}\n"
|
| 567 |
+
formatted += f"**Timestamp:** {metadata.get('timestamp', 'Unknown')}\n"
|
| 568 |
+
formatted += f"**Models Used:** {', '.join(metadata.get('models_used', []))}\n\n"
|
| 569 |
+
|
| 570 |
+
# Image Info
|
| 571 |
+
image_info = structured_output.get("image_info", {})
|
| 572 |
+
if not image_info.get("error"):
|
| 573 |
+
dims = image_info.get("dimensions", {})
|
| 574 |
+
formatted += f"## πΌοΈ Image Information\n"
|
| 575 |
+
formatted += f"**Dimensions:** {dims.get('width', 'Unknown')} x {dims.get('height', 'Unknown')}\n"
|
| 576 |
+
formatted += f"**Format:** {image_info.get('format', 'Unknown')}\n"
|
| 577 |
+
formatted += f"**Aspect Ratio:** {image_info.get('aspect_ratio', 'Unknown')}\n\n"
|
| 578 |
|
| 579 |
+
# Chart Analysis
|
| 580 |
+
chart_analysis = structured_output.get("chart_analysis", {})
|
| 581 |
+
formatted += f"## π Chart Analysis\n"
|
| 582 |
+
formatted += f"**Chart Type:** {chart_analysis.get('chart_type', 'Unknown')}\n"
|
| 583 |
+
if chart_analysis.get("description"):
|
| 584 |
+
formatted += f"**Description:** {chart_analysis['description']}\n\n"
|
| 585 |
|
| 586 |
+
# Text Extraction
|
| 587 |
+
text_extraction = structured_output.get("text_extraction", {})
|
| 588 |
+
if text_extraction.get("combined_text"):
|
| 589 |
+
formatted += f"## π Extracted Text\n"
|
| 590 |
+
formatted += f"**Methods Used:** {', '.join(text_extraction.get('methods_used', []))}\n"
|
| 591 |
+
formatted += f"**Combined Text:** {text_extraction['combined_text']}\n"
|
| 592 |
+
|
| 593 |
+
if text_extraction.get("detected_numbers"):
|
| 594 |
+
formatted += f"**Numbers Found:** {len(text_extraction['detected_numbers'])}\n"
|
| 595 |
+
|
| 596 |
+
if text_extraction.get("detected_labels"):
|
| 597 |
+
formatted += f"**Labels Found:** {', '.join(text_extraction['detected_labels'])}\n\n"
|
| 598 |
|
| 599 |
+
# Data Insights
|
| 600 |
+
data_insights = structured_output.get("data_insights", {})
|
| 601 |
+
if data_insights.get("summary_statistics"):
|
| 602 |
+
stats = data_insights["summary_statistics"]
|
| 603 |
+
formatted += f"## π Data Insights\n"
|
| 604 |
+
formatted += f"**Data Points:** {stats.get('count', 0)}\n"
|
| 605 |
+
formatted += f"**Range:** {stats.get('min', 'N/A')} - {stats.get('max', 'N/A')}\n"
|
| 606 |
+
formatted += f"**Average:** {stats.get('mean', 'N/A'):.2f}\n"
|
| 607 |
+
formatted += f"**Median:** {stats.get('median', 'N/A'):.2f}\n\n"
|
| 608 |
|
| 609 |
+
# Quality Assessment
|
| 610 |
+
quality = structured_output.get("quality_metrics", {})
|
| 611 |
+
if quality.get("overall_score") is not None:
|
| 612 |
+
formatted += f"## β Quality Assessment\n"
|
| 613 |
+
formatted += f"**Overall Score:** {quality['overall_score']:.2f}/1.0\n"
|
| 614 |
+
|
| 615 |
+
completeness = quality.get("completeness", {})
|
| 616 |
+
if completeness:
|
| 617 |
+
formatted += f"**Has Title:** {'Yes' if completeness.get('has_title') else 'No'}\n"
|
| 618 |
+
formatted += f"**Has Data:** {'Yes' if completeness.get('has_numerical_data') else 'No'}\n"
|
| 619 |
+
formatted += f"**Chart Type Identified:** {'Yes' if completeness.get('chart_type_identified') else 'No'}\n\n"
|
| 620 |
|
| 621 |
+
# Errors
|
| 622 |
+
errors = structured_output.get("errors", [])
|
| 623 |
+
if errors:
|
| 624 |
+
formatted += f"## β οΈ Errors\n"
|
| 625 |
+
for error in errors:
|
| 626 |
+
formatted += f"- {error}\n"
|
| 627 |
+
formatted += "\n"
|
| 628 |
|
| 629 |
return formatted
|
| 630 |
|
| 631 |
+
# Initialize the enhanced analyzer
|
| 632 |
+
analyzer = StructuredChartAnalyzer()
|
| 633 |
|
| 634 |
+
def analyze_with_structured_output(image, analysis_type, custom_prompt, include_florence):
|
| 635 |
+
"""Wrapper function for Gradio interface"""
|
| 636 |
+
if custom_prompt.strip():
|
| 637 |
+
prompt_to_use = custom_prompt
|
| 638 |
+
else:
|
| 639 |
+
prompt_to_use = None
|
| 640 |
+
|
| 641 |
+
# Get structured output
|
| 642 |
+
structured_result = analyzer.analyze_chart_with_prompt(
|
| 643 |
+
image,
|
| 644 |
+
custom_prompt=prompt_to_use,
|
| 645 |
+
analysis_type=analysis_type
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
# Format for display
|
| 649 |
+
formatted_display = analyzer.format_results_for_display(structured_result)
|
| 650 |
+
|
| 651 |
+
# Create CSV data if possible
|
| 652 |
+
csv_data = None
|
| 653 |
+
data_insights = structured_result.get("data_insights", {})
|
| 654 |
+
if data_insights.get("numerical_data"):
|
| 655 |
+
df = pd.DataFrame({
|
| 656 |
+
'Values': data_insights["numerical_data"],
|
| 657 |
+
'Categories': data_insights.get("categories", [""] * len(data_insights["numerical_data"]))[:len(data_insights["numerical_data"])]
|
| 658 |
+
})
|
| 659 |
+
csv_buffer = io.StringIO()
|
| 660 |
+
df.to_csv(csv_buffer, index=False)
|
| 661 |
+
csv_data = csv_buffer.getvalue()
|
| 662 |
+
|
| 663 |
+
return formatted_display, structured_result, csv_data
|
| 664 |
|
| 665 |
+
# Enhanced Gradio interface
|
| 666 |
+
with gr.Blocks(title="Enhanced Chart Analyzer with Structured Output", theme=gr.themes.Soft()) as demo:
|
| 667 |
+
gr.Markdown("# π Enhanced Chart Analyzer with Structured JSON Output")
|
| 668 |
+
gr.Markdown("Upload a chart image and get comprehensive analysis with structured data output. Supports custom prompts and multiple AI models.")
|
| 669 |
|
| 670 |
with gr.Row():
|
| 671 |
with gr.Column(scale=1):
|
| 672 |
+
gr.Markdown("## π Analysis Configuration")
|
| 673 |
|
| 674 |
+
image_input = gr.Image(
|
| 675 |
+
type="pil",
|
| 676 |
+
label="Upload Chart Image",
|
| 677 |
+
height=300
|
| 678 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 679 |
|
|
|
|
|
|
|
| 680 |
analysis_type = gr.Dropdown(
|
| 681 |
+
choices=list(analyzer.prompt_templates.keys()),
|
| 682 |
value="comprehensive",
|
| 683 |
label="Analysis Type",
|
| 684 |
+
info="Choose predefined analysis type or use custom prompt"
|
| 685 |
+
)
|
| 686 |
+
|
| 687 |
+
custom_prompt = gr.Textbox(
|
| 688 |
+
label="Custom Analysis Prompt",
|
| 689 |
+
placeholder="Enter your custom analysis instructions here...",
|
| 690 |
+
lines=3,
|
| 691 |
+
info="Optional: Override the selected analysis type with a custom prompt"
|
| 692 |
)
|
| 693 |
|
| 694 |
+
with gr.Accordion("Prompt Templates", open=False):
|
| 695 |
+
template_display = gr.Markdown()
|
| 696 |
+
|
| 697 |
+
def update_template_display(analysis_type):
|
| 698 |
+
return f"**{analysis_type.title()} Template:**\n\n{analyzer.prompt_templates.get(analysis_type, 'No template available')}"
|
| 699 |
+
|
| 700 |
+
analysis_type.change(update_template_display, inputs=[analysis_type], outputs=[template_display])
|
| 701 |
+
|
| 702 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 703 |
+
include_florence = gr.Checkbox(
|
| 704 |
+
label="Use Florence-2 Advanced Analysis",
|
| 705 |
+
value=True,
|
| 706 |
+
info="Include advanced computer vision analysis (if model available)"
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
confidence_threshold = gr.Slider(
|
| 710 |
minimum=0.1,
|
| 711 |
maximum=1.0,
|
| 712 |
value=0.5,
|
| 713 |
label="OCR Confidence Threshold"
|
| 714 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 715 |
|
| 716 |
analyze_btn = gr.Button("π Analyze Chart", variant="primary", size="lg")
|
| 717 |
clear_btn = gr.Button("ποΈ Clear All", variant="secondary")
|
| 718 |
|
| 719 |
with gr.Column(scale=2):
|
| 720 |
+
gr.Markdown("## π Analysis Results")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 721 |
|
| 722 |
+
with gr.Tabs():
|
| 723 |
+
with gr.Tab("π Formatted Results"):
|
| 724 |
+
formatted_output = gr.Markdown(
|
| 725 |
+
value="Upload an image and click 'Analyze Chart' to see results here.",
|
| 726 |
+
label="Analysis Results"
|
| 727 |
+
)
|
| 728 |
+
|
| 729 |
+
with gr.Tab("π§ Structured JSON"):
|
| 730 |
+
json_output = gr.JSON(
|
| 731 |
+
label="Complete Structured Output",
|
| 732 |
+
show_label=True
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
with gr.Tab("π Data Export"):
|
| 736 |
+
gr.Markdown("### Export Options")
|
| 737 |
+
|
| 738 |
+
with gr.Row():
|
| 739 |
+
json_download = gr.File(
|
| 740 |
+
label="Download JSON Results",
|
| 741 |
+
visible=False
|
| 742 |
+
)
|
| 743 |
+
csv_download = gr.File(
|
| 744 |
+
label="Download CSV Data",
|
| 745 |
+
visible=False
|
| 746 |
+
)
|
| 747 |
+
|
| 748 |
+
export_btn = gr.Button("π₯ Generate Export Files")
|
| 749 |
+
export_status = gr.Textbox(label="Export Status", interactive=False)
|
| 750 |
+
|
| 751 |
+
# Example section
|
| 752 |
+
gr.Markdown("## π― Example Prompts")
|
| 753 |
+
|
| 754 |
+
example_prompts = [
|
| 755 |
+
["What are the main trends shown in this chart?", "trend_analysis"],
|
| 756 |
+
["Extract all numerical data points and their labels", "data_extraction"],
|
| 757 |
+
["Describe this chart for accessibility purposes", "accessibility"],
|
| 758 |
+
["What business insights can be derived from this data?", "business_insights"],
|
| 759 |
+
["Analyze the performance metrics shown in this dashboard", "comprehensive"]
|
| 760 |
+
]
|
| 761 |
+
|
| 762 |
+
gr.Examples(
|
| 763 |
+
examples=example_prompts,
|
| 764 |
+
inputs=[custom_prompt, analysis_type],
|
| 765 |
+
label="Try these example prompts:"
|
| 766 |
+
)
|
| 767 |
|
| 768 |
+
# Event handlers
|
| 769 |
+
def analyze_chart_comprehensive(image, analysis_type, custom_prompt, include_florence, confidence_threshold):
|
| 770 |
+
"""Main analysis function with all parameters"""
|
| 771 |
+
if image is None:
|
| 772 |
+
return "Please upload an image first.", {}, "No data to export", "No data to export"
|
| 773 |
+
|
| 774 |
try:
|
| 775 |
+
# Get structured output
|
| 776 |
+
structured_result = analyzer.analyze_chart_with_prompt(
|
| 777 |
+
image,
|
| 778 |
+
custom_prompt=custom_prompt.strip() if custom_prompt.strip() else None,
|
| 779 |
+
analysis_type=analysis_type
|
| 780 |
+
)
|
| 781 |
+
|
| 782 |
+
# Format for display
|
| 783 |
+
formatted_display = analyzer.format_results_for_display(structured_result)
|
| 784 |
+
|
| 785 |
+
return formatted_display, structured_result, "β
Analysis completed successfully", "Ready for export"
|
| 786 |
+
|
| 787 |
except Exception as e:
|
| 788 |
+
error_msg = f"β Analysis failed: {str(e)}"
|
| 789 |
+
return error_msg, {"error": str(e)}, error_msg, error_msg
|
| 790 |
|
| 791 |
+
def generate_export_files(json_data):
|
| 792 |
+
"""Generate downloadable export files"""
|
| 793 |
+
if not json_data or json_data.get("error"):
|
| 794 |
+
return None, None, "β No valid data to export"
|
| 795 |
|
| 796 |
try:
|
| 797 |
+
# Generate JSON file
|
| 798 |
+
json_str = json.dumps(json_data, indent=2, default=str)
|
| 799 |
+
json_file = io.StringIO(json_str)
|
| 800 |
|
| 801 |
+
# Generate CSV file if numerical data exists
|
| 802 |
+
csv_file = None
|
| 803 |
+
data_insights = json_data.get("data_insights", {})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 804 |
|
| 805 |
+
if data_insights.get("numerical_data"):
|
| 806 |
+
df_data = {
|
| 807 |
+
'Numerical_Values': data_insights["numerical_data"]
|
| 808 |
+
}
|
| 809 |
+
|
| 810 |
+
# Add categories if available
|
| 811 |
+
categories = data_insights.get("categories", [])
|
| 812 |
+
if categories:
|
| 813 |
+
# Pad or trim categories to match numerical data length
|
| 814 |
+
num_values = len(data_insights["numerical_data"])
|
| 815 |
+
if len(categories) < num_values:
|
| 816 |
+
categories.extend([""] * (num_values - len(categories)))
|
| 817 |
+
else:
|
| 818 |
+
categories = categories[:num_values]
|
| 819 |
+
df_data['Categories'] = categories
|
| 820 |
+
|
| 821 |
+
# Add detected numbers with metadata
|
| 822 |
+
detected_numbers = json_data.get("text_extraction", {}).get("detected_numbers", [])
|
| 823 |
+
if detected_numbers:
|
| 824 |
+
# Create a summary of detected numbers
|
| 825 |
+
number_summary = []
|
| 826 |
+
for num_data in detected_numbers:
|
| 827 |
+
number_summary.append({
|
| 828 |
+
'Value': num_data.get('value', ''),
|
| 829 |
+
'Type': num_data.get('type', ''),
|
| 830 |
+
'Position': str(num_data.get('position', ''))
|
| 831 |
+
})
|
| 832 |
+
|
| 833 |
+
# Convert to DataFrame
|
| 834 |
+
numbers_df = pd.DataFrame(number_summary)
|
| 835 |
+
csv_buffer = io.StringIO()
|
| 836 |
+
numbers_df.to_csv(csv_buffer, index=False)
|
| 837 |
+
csv_file = csv_buffer.getvalue()
|
| 838 |
+
else:
|
| 839 |
+
# Fallback CSV with basic data
|
| 840 |
+
df = pd.DataFrame(df_data)
|
| 841 |
+
csv_buffer = io.StringIO()
|
| 842 |
+
df.to_csv(csv_buffer, index=False)
|
| 843 |
+
csv_file = csv_buffer.getvalue()
|
| 844 |
|
| 845 |
+
return json_str, csv_file, "β
Export files generated successfully"
|
| 846 |
|
| 847 |
except Exception as e:
|
| 848 |
+
return None, None, f"β Export failed: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 849 |
|
| 850 |
+
def clear_all_inputs():
|
| 851 |
+
"""Clear all inputs and outputs"""
|
| 852 |
+
return (
|
| 853 |
+
None, # image
|
| 854 |
+
"Upload an image and click 'Analyze Chart' to see results here.", # formatted output
|
| 855 |
+
{}, # json output
|
| 856 |
+
"No data to export", # export status
|
| 857 |
+
"", # custom prompt
|
| 858 |
+
None, # json download
|
| 859 |
+
None # csv download
|
| 860 |
+
)
|
| 861 |
+
|
| 862 |
+
# Connect event handlers
|
| 863 |
analyze_btn.click(
|
| 864 |
+
fn=analyze_chart_comprehensive,
|
| 865 |
+
inputs=[image_input, analysis_type, custom_prompt, include_florence, confidence_threshold],
|
| 866 |
+
outputs=[formatted_output, json_output, export_status, export_status]
|
| 867 |
)
|
| 868 |
|
| 869 |
+
export_btn.click(
|
| 870 |
+
fn=generate_export_files,
|
| 871 |
+
inputs=[json_output],
|
| 872 |
+
outputs=[json_download, csv_download, export_status]
|
| 873 |
)
|
| 874 |
|
| 875 |
clear_btn.click(
|
| 876 |
+
fn=clear_all_inputs,
|
| 877 |
+
outputs=[image_input, formatted_output, json_output, export_status, custom_prompt, json_download, csv_download]
|
| 878 |
)
|
| 879 |
+
|
| 880 |
+
# Initialize template display
|
| 881 |
+
template_display.value = update_template_display("comprehensive")
|
| 882 |
+
|
| 883 |
+
# Additional helper functions for advanced features
|
| 884 |
+
def load_image_from_url(url):
|
| 885 |
+
"""Load image from URL"""
|
| 886 |
+
try:
|
| 887 |
+
response = requests.get(url, timeout=10)
|
| 888 |
+
response.raise_for_status()
|
| 889 |
+
image = Image.open(io.BytesIO(response.content))
|
| 890 |
+
return image, "β
Image loaded successfully from URL"
|
| 891 |
+
except Exception as e:
|
| 892 |
+
return None, f"β Failed to load image: {str(e)}"
|
| 893 |
+
|
| 894 |
+
# Add URL loading capability
|
| 895 |
+
with demo:
|
| 896 |
+
with gr.Accordion("π Load from URL", open=False):
|
| 897 |
+
url_input = gr.Textbox(
|
| 898 |
+
label="Image URL",
|
| 899 |
+
placeholder="https://example.com/chart.png"
|
| 900 |
+
)
|
| 901 |
+
load_url_btn = gr.Button("π₯ Load from URL")
|
| 902 |
+
|
| 903 |
+
load_url_btn.click(
|
| 904 |
+
fn=load_image_from_url,
|
| 905 |
+
inputs=[url_input],
|
| 906 |
+
outputs=[image_input, export_status]
|
| 907 |
+
)
|
| 908 |
|
| 909 |
if __name__ == "__main__":
|
| 910 |
+
print("π Starting Enhanced Chart Analyzer...")
|
| 911 |
+
print("π Features:")
|
| 912 |
+
print(" - Structured JSON output")
|
| 913 |
+
print(" - Custom analysis prompts")
|
| 914 |
+
print(" - Multiple AI models (BLIP, TrOCR, EasyOCR, Florence-2)")
|
| 915 |
+
print(" - Data export capabilities")
|
| 916 |
+
print(" - Quality assessment")
|
| 917 |
+
print(" - Advanced visual analysis")
|
| 918 |
+
|
| 919 |
try:
|
| 920 |
demo.launch(
|
| 921 |
server_name="0.0.0.0",
|
| 922 |
server_port=7860,
|
| 923 |
share=False,
|
| 924 |
show_error=True,
|
| 925 |
+
debug=True
|
| 926 |
)
|
| 927 |
except Exception as e:
|
| 928 |
+
print(f"β Error launching app: {e}")
|
| 929 |
+
print("π Trying fallback launch...")
|
| 930 |
demo.launch()
|