ravi-vc commited on
Commit
3dfe862
Β·
verified Β·
1 Parent(s): 9555a2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +313 -294
app.py CHANGED
@@ -1,381 +1,400 @@
1
- import asyncio
2
- import websockets
3
  import gradio as gr
4
  import torch
5
- from transformers import (
6
- BlipProcessor, BlipForConditionalGeneration,
7
- TrOCRProcessor, VisionEncoderDecoderModel,
8
- AutoProcessor, AutoModelForCausalLM
9
- )
10
  from PIL import Image
11
- import easyocr
12
- import matplotlib.pyplot as plt
13
- import pandas as pd
14
  import numpy as np
15
- import cv2
16
  import io
17
- import base64
18
  import requests
19
  import warnings
20
 
21
- # Suppress warnings
22
  warnings.filterwarnings("ignore")
23
 
24
  class ChartAnalyzer:
25
  def __init__(self):
26
- # Load models
27
- self.load_models()
28
 
29
- def load_models(self):
30
- """Load all required models"""
31
  try:
32
- print("Loading BLIP model...")
33
- # BLIP for image captioning and understanding
34
  self.blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
35
  self.blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
36
-
37
- print("Loading TrOCR model...")
38
- # TrOCR for text extraction
39
- self.trocr_processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed")
40
- self.trocr_model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-printed")
41
-
42
- print("Loading EasyOCR...")
43
- # EasyOCR for backup text extraction
44
- self.ocr_reader = easyocr.Reader(['en'], gpu=False) # Force CPU to avoid GPU issues
45
-
46
- # Florence-2 for advanced understanding (optional)
47
- try:
48
- print("Attempting to load Florence-2...")
49
- self.florence_processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
50
- self.florence_model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
51
- self.florence_available = True
52
- print("Florence-2 loaded successfully!")
53
- except Exception as e:
54
- print(f"Florence-2 not available: {e}")
55
- self.florence_available = False
56
-
57
- print("All models loaded successfully!")
58
-
59
  except Exception as e:
60
- print(f"Error loading models: {e}")
61
  raise e
62
 
63
- def analyze_chart(self, image, analysis_type="comprehensive"):
64
- """Main function to analyze charts"""
65
  if image is None:
66
- return "Please upload an image first."
67
-
68
- results = {}
69
 
70
  try:
71
- # Convert to PIL Image if needed
72
  if not isinstance(image, Image.Image):
73
- image = Image.fromarray(image).convert('RGB')
74
-
75
- # Basic image understanding with BLIP
76
- results['description'] = self.get_image_description(image)
77
-
78
- # Extract text using multiple methods
79
- results['extracted_text'] = self.extract_text_multi_method(image)
80
 
81
- # Chart type detection
82
- results['chart_type'] = self.detect_chart_type(image, results['description'])
83
 
84
- # Data extraction (if possible)
85
- if analysis_type in ["comprehensive", "data_extraction"]:
86
- results['data_points'] = self.extract_data_points(image, results['chart_type'])
 
 
87
 
88
- # Advanced analysis with Florence-2 (if available)
89
- if self.florence_available and analysis_type == "comprehensive":
90
- results['advanced_analysis'] = self.florence_analysis(image)
91
-
92
- return self.format_results(results)
 
 
93
 
94
  except Exception as e:
95
- return f"Error analyzing chart: {str(e)}"
96
 
97
- def get_image_description(self, image):
98
- """Get image description using BLIP"""
99
  try:
100
  inputs = self.blip_processor(image, return_tensors="pt")
101
- out = self.blip_model.generate(**inputs, max_length=100)
102
- description = self.blip_processor.decode(out[0], skip_special_tokens=True)
103
- return description
104
- except:
105
- return "Unable to generate description"
106
 
107
- def extract_text_multi_method(self, image):
108
- """Extract text using multiple OCR methods"""
109
- extracted_texts = {}
 
 
 
 
110
 
111
- # Method 1: TrOCR
112
- try:
113
- pixel_values = self.trocr_processor(image, return_tensors="pt").pixel_values
114
- generated_ids = self.trocr_model.generate(pixel_values)
115
- trocr_text = self.trocr_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
116
- extracted_texts['TrOCR'] = trocr_text
117
- except:
118
- extracted_texts['TrOCR'] = "Failed"
119
-
120
- # Method 2: EasyOCR
121
- try:
122
- # Convert PIL to numpy array
123
- image_np = np.array(image)
124
- ocr_results = self.ocr_reader.readtext(image_np)
125
- easyocr_text = ' '.join([result[1] for result in ocr_results])
126
- extracted_texts['EasyOCR'] = easyocr_text
127
- except:
128
- extracted_texts['EasyOCR'] = "Failed"
129
 
130
- return extracted_texts
131
 
132
- def detect_chart_type(self, image, description):
133
- """Detect chart type based on image analysis"""
134
- description_lower = description.lower()
135
 
136
- chart_keywords = {
137
- 'bar_chart': ['bar', 'column', 'histogram'],
138
- 'line_chart': ['line', 'trend', 'time series'],
139
- 'pie_chart': ['pie', 'circular', 'slice'],
140
- 'scatter_plot': ['scatter', 'correlation', 'points'],
141
- 'area_chart': ['area', 'filled'],
142
- 'box_plot': ['box', 'whisker'],
143
- 'heatmap': ['heat', 'color coded', 'matrix']
 
 
144
  }
145
 
146
- for chart_type, keywords in chart_keywords.items():
147
- if any(keyword in description_lower for keyword in keywords):
148
- return chart_type.replace('_', ' ').title()
149
 
150
- return "Unknown Chart Type"
151
 
152
- def extract_data_points(self, image, chart_type):
153
- """Attempt to extract data points (simplified approach)"""
154
- try:
155
- # This is a simplified version - real implementation would be more sophisticated
156
- # Convert to grayscale for analysis
157
- image_np = np.array(image.convert('L'))
158
-
159
- # Basic edge detection
160
- edges = cv2.Canny(image_np, 50, 150)
161
-
162
- # Find contours
163
- contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
164
-
165
- data_info = {
166
- 'contours_found': len(contours),
167
- 'image_dimensions': image_np.shape,
168
- 'note': 'This is a simplified data extraction. Advanced algorithms needed for accurate data point extraction.'
169
- }
170
-
171
- return data_info
172
-
173
- except Exception as e:
174
- return f"Data extraction failed: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
- def florence_analysis(self, image):
177
- """Advanced analysis using Florence-2"""
178
- if not self.florence_available:
179
- return "Florence-2 model not available"
180
 
181
- try:
182
- # Florence-2 prompts for different tasks
183
- prompts = [
184
- "<OD>", # Object Detection
185
- "<DENSE_REGION_CAPTION>", # Dense captioning
186
- "<OCR_WITH_REGION>" # OCR with regions
187
- ]
188
-
189
- results = {}
190
- for prompt in prompts:
191
- inputs = self.florence_processor(text=prompt, images=image, return_tensors="pt")
192
- generated_ids = self.florence_model.generate(
193
- input_ids=inputs["input_ids"],
194
- pixel_values=inputs["pixel_values"],
195
- max_new_tokens=1024,
196
- num_beams=3
197
- )
198
- generated_text = self.florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
199
- results[prompt] = generated_text
200
-
201
- return results
202
- except:
203
- return "Florence-2 analysis failed"
204
 
205
- def format_results(self, results):
206
- """Format results for display"""
207
- formatted = "# Chart Analysis Results\n\n"
208
 
209
- if 'description' in results:
210
- formatted += f"## Image Description\n{results['description']}\n\n"
 
211
 
212
- if 'chart_type' in results:
213
- formatted += f"## Chart Type\n{results['chart_type']}\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
- if 'extracted_text' in results:
216
- formatted += "## Extracted Text\n"
217
- for method, text in results['extracted_text'].items():
218
- formatted += f"**{method}:** {text}\n\n"
 
 
 
 
 
219
 
220
- if 'data_points' in results:
221
- formatted += f"## Data Analysis\n{results['data_points']}\n\n"
 
222
 
223
- if 'advanced_analysis' in results:
224
- formatted += f"## Advanced Analysis\n{results['advanced_analysis']}\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
 
226
- return formatted
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
- # Initialize the analyzer
229
- analyzer = ChartAnalyzer()
 
230
 
231
- # Create Gradio interface
232
- def analyze_uploaded_chart(image, analysis_type):
233
- return analyzer.analyze_chart(image, analysis_type)
234
 
235
- # Create the Gradio app
236
- with gr.Blocks(title="Chart Analyzer & Data Extractor", theme=gr.themes.Soft()) as demo:
237
- gr.Markdown("# πŸ“Š Chart Analyzer & Data Extractor")
238
- gr.Markdown("Upload a chart image to extract data and analyze its contents using multiple AI models including BLIP, TrOCR, and Florence-2.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
240
  with gr.Row():
 
241
  with gr.Column(scale=1):
242
- gr.Markdown("## πŸ“ Upload Your Chart")
243
-
244
- # Multiple upload options
245
- with gr.Tabs():
246
- with gr.Tab("πŸ“€ Upload Image"):
247
- image_input = gr.Image(
248
- type="pil",
249
- label="Upload Chart Image",
250
- height=400,
251
- sources=["upload", "webcam", "clipboard"],
252
- format="png"
253
- )
254
- gr.Markdown("**Supported formats:** PNG, JPG, JPEG, GIF, BMP")
255
- gr.Markdown("**Max size:** 10MB")
256
-
257
- with gr.Tab("πŸ”— From URL"):
258
- url_input = gr.Textbox(
259
- label="Image URL",
260
- placeholder="https://example.com/chart.png"
261
- )
262
- load_url_btn = gr.Button("Load from URL")
263
 
264
- # Analysis options
265
- gr.Markdown("## βš™οΈ Analysis Settings")
266
- analysis_type = gr.Dropdown(
267
- choices=["basic", "comprehensive", "data_extraction"],
268
- value="comprehensive",
269
- label="Analysis Type",
270
- info="Choose the depth of analysis"
271
- )
272
-
273
- with gr.Accordion("Advanced Options", open=False):
274
- confidence_threshold = gr.Slider(
275
- minimum=0.1,
276
- maximum=1.0,
277
- value=0.5,
278
- label="OCR Confidence Threshold"
279
  )
280
- use_florence = gr.Checkbox(
281
- label="Use Florence-2 (Advanced Analysis)",
282
- value=True
 
 
283
  )
 
 
 
 
 
 
 
 
284
 
285
- analyze_btn = gr.Button("πŸ” Analyze Chart", variant="primary", size="lg")
286
- clear_btn = gr.Button("πŸ—‘οΈ Clear All", variant="secondary")
 
 
287
 
 
288
  with gr.Column(scale=2):
289
- gr.Markdown("## πŸ“Š Analysis Results")
290
  output = gr.Markdown(
291
- value="Upload an image and click 'Analyze Chart' to see results here.",
292
- label="Results"
293
  )
294
-
295
- # Additional output components
296
- with gr.Accordion("Raw Data Export", open=False):
297
- json_output = gr.JSON(label="Structured Data")
298
- csv_download = gr.File(label="Download CSV", visible=False)
299
-
300
- # Function to load image from URL
301
- def load_image_from_url(url):
302
- try:
303
- import requests
304
- response = requests.get(url)
305
- response.raise_for_status()
306
- image = Image.open(io.BytesIO(response.content))
307
- return image, "Image loaded successfully!"
308
- except Exception as e:
309
- return None, f"Error loading image: {str(e)}"
310
-
311
- # Enhanced analysis function
312
- def analyze_uploaded_chart(image, analysis_type, confidence_threshold, use_florence):
313
- if image is None:
314
- return "Please upload an image first.", {}, None
315
-
316
- try:
317
- result = analyzer.analyze_chart(image, analysis_type)
318
-
319
- # Create structured data for JSON output
320
- structured_data = {
321
- "analysis_type": analysis_type,
322
- "confidence_threshold": confidence_threshold,
323
- "models_used": ["BLIP", "TrOCR", "EasyOCR"],
324
- "timestamp": pd.Timestamp.now().isoformat()
325
- }
326
-
327
- if use_florence and analyzer.florence_available:
328
- structured_data["models_used"].append("Florence-2")
329
-
330
- return result, structured_data, None
331
-
332
- except Exception as e:
333
- error_msg = f"Error analyzing chart: {str(e)}"
334
- return error_msg, {"error": error_msg}, None
335
 
336
- # Clear function
337
- def clear_all():
338
- return None, "Upload an image and click 'Analyze Chart' to see results here.", {}, None
339
-
340
- # Examples
341
- gr.Examples(
342
  examples=[
343
- ["https://via.placeholder.com/600x400/0066CC/FFFFFF?text=Sample+Bar+Chart", "comprehensive"],
344
- ["https://via.placeholder.com/600x400/FF6B35/FFFFFF?text=Sample+Line+Chart", "data_extraction"],
345
  ],
346
  inputs=[image_input, analysis_type],
347
- label="Try these examples:"
348
  )
349
 
350
  # Event handlers
351
  analyze_btn.click(
352
- fn=analyze_uploaded_chart,
353
- inputs=[image_input, analysis_type, confidence_threshold, use_florence],
354
- outputs=[output, json_output, csv_download]
355
  )
356
 
357
- load_url_btn.click(
358
  fn=load_image_from_url,
359
  inputs=[url_input],
360
  outputs=[image_input, output]
361
  )
362
 
363
  clear_btn.click(
364
- fn=clear_all,
365
- outputs=[image_input, output, json_output, csv_download]
366
  )
367
 
 
368
  if __name__ == "__main__":
369
- print("Starting Chart Analyzer...")
370
  try:
371
- demo.launch(
372
  server_name="0.0.0.0",
373
  server_port=7860,
374
- share=False,
375
- show_error=True,
376
- quiet=False
377
  )
378
  except Exception as e:
379
- print(f"Error launching app: {e}")
380
- # Fallback launch
381
- demo.launch()
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import BlipProcessor, BlipForConditionalGeneration
 
 
 
 
4
  from PIL import Image
 
 
 
5
  import numpy as np
 
6
  import io
 
7
  import requests
8
  import warnings
9
 
 
10
  warnings.filterwarnings("ignore")
11
 
12
  class ChartAnalyzer:
13
  def __init__(self):
14
+ print("πŸš€ Initializing Chart Analyzer with BLIP...")
15
+ self.load_blip_model()
16
 
17
+ def load_blip_model(self):
18
+ """Load BLIP model for image understanding"""
19
  try:
20
+ print("πŸ“¦ Loading BLIP model...")
 
21
  self.blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
22
  self.blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
23
+ print("βœ… BLIP model loaded successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  except Exception as e:
25
+ print(f"❌ Error loading BLIP model: {e}")
26
  raise e
27
 
28
+ def analyze_chart(self, image, analysis_type="detailed"):
29
+ """Analyze chart using BLIP"""
30
  if image is None:
31
+ return "❌ Please upload an image first."
 
 
32
 
33
  try:
34
+ # Ensure PIL Image format
35
  if not isinstance(image, Image.Image):
36
+ if isinstance(image, np.ndarray):
37
+ image = Image.fromarray(image)
38
+ image = image.convert('RGB')
 
 
 
 
39
 
40
+ print(f"πŸ” Analyzing image: {image.size} pixels")
 
41
 
42
+ # Get comprehensive analysis
43
+ basic_caption = self.get_basic_caption(image)
44
+ detailed_analysis = self.get_detailed_analysis(image)
45
+ chart_type = self.detect_chart_type(basic_caption + " " + detailed_analysis)
46
+ insights = self.generate_insights(basic_caption, detailed_analysis, chart_type)
47
 
48
+ # Format results based on analysis type
49
+ if analysis_type == "basic":
50
+ return self.format_basic_results(basic_caption, chart_type)
51
+ elif analysis_type == "detailed":
52
+ return self.format_detailed_results(basic_caption, detailed_analysis, chart_type, insights, image)
53
+ else: # comprehensive
54
+ return self.format_comprehensive_results(basic_caption, detailed_analysis, chart_type, insights, image)
55
 
56
  except Exception as e:
57
+ return f"❌ Error analyzing chart: {str(e)}"
58
 
59
+ def get_basic_caption(self, image):
60
+ """Get basic image caption"""
61
  try:
62
  inputs = self.blip_processor(image, return_tensors="pt")
63
+ out = self.blip_model.generate(**inputs, max_length=50)
64
+ caption = self.blip_processor.decode(out[0], skip_special_tokens=True)
65
+ return caption
66
+ except Exception as e:
67
+ return f"Caption generation failed: {str(e)}"
68
 
69
+ def get_detailed_analysis(self, image):
70
+ """Get detailed analysis using question-based prompting"""
71
+ questions = [
72
+ "What type of chart or graph is shown in this image?",
73
+ "What are the main elements visible in this chart?",
74
+ "What data or information does this visualization show?"
75
+ ]
76
 
77
+ analyses = []
78
+ for question in questions:
79
+ try:
80
+ inputs = self.blip_processor(image, question, return_tensors="pt")
81
+ out = self.blip_model.generate(**inputs, max_length=100, do_sample=True, top_k=50)
82
+ analysis = self.blip_processor.decode(out[0], skip_special_tokens=True)
83
+ # Remove the question from the response
84
+ analysis = analysis.replace(question, "").strip()
85
+ if analysis:
86
+ analyses.append(analysis)
87
+ except:
88
+ continue
 
 
 
 
 
 
89
 
90
+ return " | ".join(analyses[:2]) if analyses else "Unable to generate detailed analysis"
91
 
92
+ def detect_chart_type(self, text):
93
+ """Detect chart type from text analysis"""
94
+ text_lower = text.lower()
95
 
96
+ chart_types = {
97
+ 'πŸ“Š Bar Chart': ['bar', 'bars', 'column', 'columns', 'histogram'],
98
+ 'πŸ“ˆ Line Chart': ['line', 'lines', 'trend', 'time series', 'linear', 'curve'],
99
+ 'πŸ₯§ Pie Chart': ['pie', 'circular', 'slice', 'slices', 'donut', 'wheel'],
100
+ 'πŸ“‰ Scatter Plot': ['scatter', 'points', 'dots', 'correlation', 'plotted'],
101
+ 'πŸ“‹ Table/Data': ['table', 'data', 'rows', 'columns', 'grid'],
102
+ 'πŸ”„ Flowchart': ['flow', 'diagram', 'process', 'boxes', 'flowchart'],
103
+ 'πŸ“Š Area Chart': ['area', 'filled', 'shaded region'],
104
+ 'πŸ“¦ Box Plot': ['box', 'whisker', 'quartile'],
105
+ '🌑️ Heatmap': ['heat', 'color coded', 'matrix', 'intensity']
106
  }
107
 
108
+ for chart_type, keywords in chart_types.items():
109
+ if any(keyword in text_lower for keyword in keywords):
110
+ return chart_type
111
 
112
+ return "πŸ“Š Data Visualization"
113
 
114
+ def generate_insights(self, caption, analysis, chart_type):
115
+ """Generate insights from the analysis"""
116
+ insights = []
117
+ text = (caption + " " + analysis).lower()
118
+
119
+ # Color insights
120
+ colors = ['blue', 'red', 'green', 'yellow', 'orange', 'purple', 'pink']
121
+ found_colors = [color for color in colors if color in text]
122
+ if found_colors:
123
+ insights.append(f"🎨 Uses {', '.join(found_colors)} color scheme")
124
+
125
+ # Trend insights
126
+ if any(word in text for word in ['increasing', 'rising', 'upward', 'growth']):
127
+ insights.append("πŸ“ˆ Shows increasing/upward trend")
128
+ elif any(word in text for word in ['decreasing', 'falling', 'downward', 'decline']):
129
+ insights.append("πŸ“‰ Shows decreasing/downward trend")
130
+
131
+ # Data insights
132
+ if any(word in text for word in ['multiple', 'several', 'many', 'various']):
133
+ insights.append("πŸ“Š Contains multiple data series or categories")
134
+
135
+ if 'comparison' in text or 'compare' in text:
136
+ insights.append("βš–οΈ Designed for comparison analysis")
137
+
138
+ # Chart-specific insights
139
+ if 'Bar Chart' in chart_type:
140
+ insights.append("πŸ“Š Effective for categorical data comparison")
141
+ elif 'Line Chart' in chart_type:
142
+ insights.append("πŸ“ˆ Shows trends over continuous data")
143
+ elif 'Pie Chart' in chart_type:
144
+ insights.append("πŸ₯§ Represents parts of a whole")
145
+
146
+ return insights[:4] # Limit to 4 insights
147
+
148
+ def format_basic_results(self, caption, chart_type):
149
+ return f"""# πŸ“Š Basic Chart Analysis
150
+
151
+ ## πŸ€– AI Description
152
+ {caption}
153
+
154
+ ## πŸ“ˆ Chart Type
155
+ {chart_type}
156
+
157
+ ---
158
+ *Powered by BLIP (Bootstrapping Language-Image Pre-training)*
159
+ """
160
 
161
+ def format_detailed_results(self, caption, analysis, chart_type, insights, image):
162
+ insight_text = "\n".join([f"β€’ {insight}" for insight in insights])
 
 
163
 
164
+ return f"""# πŸ“Š Detailed Chart Analysis
165
+
166
+ ## πŸ€– AI Description
167
+ **Basic Caption:** {caption}
168
+
169
+ **Detailed Analysis:** {analysis}
170
+
171
+ ## πŸ“ˆ Chart Type
172
+ {chart_type}
173
+
174
+ ## πŸ’‘ Key Insights
175
+ {insight_text if insight_text.strip() else "β€’ Analysis provides valuable data visualization insights"}
176
+
177
+ ## πŸ“ Technical Details
178
+ - **Image Size:** {image.size[0]} Γ— {image.size[1]} pixels
179
+ - **Color Mode:** {image.mode}
180
+ - **Analysis Model:** BLIP (Salesforce)
181
+
182
+ ---
183
+ *Analysis completed using state-of-the-art vision-language AI*
184
+ """
 
 
185
 
186
+ def format_comprehensive_results(self, caption, analysis, chart_type, insights, image):
187
+ insight_text = "\n".join([f"β€’ {insight}" for insight in insights])
 
188
 
189
+ # Additional comprehensive analysis
190
+ recommendations = self.get_recommendations(chart_type, insights)
191
+ rec_text = "\n".join([f"β€’ {rec}" for rec in recommendations])
192
 
193
+ return f"""# πŸ“Š Comprehensive Chart Analysis
194
+
195
+ ## πŸ€– AI-Powered Description
196
+ **Quick Summary:** {caption}
197
+
198
+ **Detailed Analysis:** {analysis}
199
+
200
+ ## πŸ“ˆ Chart Classification
201
+ {chart_type}
202
+
203
+ ## πŸ’‘ Visual Insights
204
+ {insight_text if insight_text.strip() else "β€’ Professional data visualization with clear visual elements"}
205
+
206
+ ## 🎯 Recommendations
207
+ {rec_text}
208
+
209
+ ## πŸ“Š Technical Specifications
210
+ - **Resolution:** {image.size[0]} Γ— {image.size[1]} pixels
211
+ - **Aspect Ratio:** {round(image.size[0]/image.size[1], 2)}:1
212
+ - **Color Depth:** {image.mode}
213
+ - **File Format:** {image.format if hasattr(image, 'format') else 'Unknown'}
214
+
215
+ ## πŸ”§ Analysis Details
216
+ - **Primary Model:** BLIP (Bootstrapping Language-Image Pre-training)
217
+ - **Publisher:** Salesforce Research
218
+ - **Capability:** Vision-Language Understanding
219
+ - **Processing:** Real-time AI analysis
220
+
221
+ ---
222
+ *πŸš€ Advanced chart analysis powered by cutting-edge AI technology*
223
+ """
224
+
225
+ def get_recommendations(self, chart_type, insights):
226
+ """Generate recommendations based on chart type and insights"""
227
+ recs = []
228
 
229
+ if 'Bar Chart' in chart_type:
230
+ recs.append("Consider adding data labels for precise values")
231
+ recs.append("Ensure consistent spacing between bars")
232
+ elif 'Line Chart' in chart_type:
233
+ recs.append("Add gridlines for better readability")
234
+ recs.append("Consider trend lines for data analysis")
235
+ elif 'Pie Chart' in chart_type:
236
+ recs.append("Limit to 5-7 categories for clarity")
237
+ recs.append("Consider using percentages on slices")
238
 
239
+ # General recommendations
240
+ recs.append("Verify axis labels and titles are clear")
241
+ recs.append("Consider accessibility for color-blind users")
242
 
243
+ return recs[:3] # Limit recommendations
244
+
245
+ # Initialize analyzer
246
+ print("πŸ”„ Starting Chart Analyzer initialization...")
247
+ try:
248
+ analyzer = ChartAnalyzer()
249
+ print("βœ… Chart Analyzer ready!")
250
+ except Exception as e:
251
+ print(f"❌ Failed to initialize: {e}")
252
+ analyzer = None
253
+
254
+ def load_image_from_url(url):
255
+ """Load image from URL"""
256
+ if not url.strip():
257
+ return None, "Please enter a valid URL"
258
+
259
+ try:
260
+ print(f"🌐 Loading image from: {url}")
261
+ response = requests.get(url, timeout=15, headers={
262
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
263
+ })
264
+ response.raise_for_status()
265
+
266
+ image = Image.open(io.BytesIO(response.content))
267
+ print(f"βœ… Image loaded: {image.size} pixels")
268
+ return image, f"βœ… Successfully loaded image ({image.size[0]}Γ—{image.size[1]})"
269
 
270
+ except Exception as e:
271
+ print(f"❌ URL load error: {e}")
272
+ return None, f"❌ Failed to load image: {str(e)}"
273
+
274
+ def analyze_chart_interface(image, analysis_type):
275
+ """Main interface function"""
276
+ if analyzer is None:
277
+ return "❌ Analyzer not initialized. Please refresh the page."
278
+
279
+ if image is None:
280
+ return "πŸ“€ Please upload an image or load one from URL first."
281
+
282
+ print(f"πŸ” Starting {analysis_type} analysis...")
283
+ result = analyzer.analyze_chart(image, analysis_type)
284
+ print("βœ… Analysis complete!")
285
+ return result
286
 
287
+ def clear_interface():
288
+ """Clear all inputs"""
289
+ return None, "", "πŸ“€ Ready for new image analysis!"
290
 
291
+ # Create Gradio Interface
292
+ print("🎨 Building user interface...")
 
293
 
294
+ interface = gr.Blocks(
295
+ title="AI Chart Analyzer with BLIP",
296
+ css="""
297
+ .gradio-container {
298
+ max-width: 1000px !important;
299
+ margin: auto !important;
300
+ }
301
+ .main-header {
302
+ text-align: center;
303
+ background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
304
+ color: white;
305
+ padding: 20px;
306
+ border-radius: 10px;
307
+ margin-bottom: 20px;
308
+ }
309
+ """
310
+ )
311
+
312
+ with interface:
313
+ gr.HTML("""
314
+ <div class="main-header">
315
+ <h1>πŸ€– AI Chart Analyzer</h1>
316
+ <p>Upload any chart or graph to get intelligent analysis powered by BLIP AI</p>
317
+ </div>
318
+ """)
319
 
320
  with gr.Row():
321
+ # Left Column - Input
322
  with gr.Column(scale=1):
323
+ gr.Markdown("### πŸ“ Upload Your Chart")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
 
325
+ # Image input options
326
+ with gr.Tab("πŸ“€ Upload File"):
327
+ image_input = gr.Image(
328
+ label="Drop chart image here or click to browse",
329
+ type="pil"
 
 
 
 
 
 
 
 
 
 
330
  )
331
+
332
+ with gr.Tab("🌐 Load from URL"):
333
+ url_input = gr.Textbox(
334
+ label="Image URL",
335
+ placeholder="https://example.com/chart.png"
336
  )
337
+ load_btn = gr.Button("πŸ”„ Load Image", variant="secondary")
338
+
339
+ gr.Markdown("### βš™οΈ Analysis Settings")
340
+ analysis_type = gr.Radio(
341
+ choices=["basic", "detailed", "comprehensive"],
342
+ value="detailed",
343
+ label="Analysis Depth"
344
+ )
345
 
346
+ # Action buttons
347
+ with gr.Row():
348
+ analyze_btn = gr.Button("πŸš€ Analyze Chart", variant="primary")
349
+ clear_btn = gr.Button("🧹 Clear All", variant="secondary")
350
 
351
+ # Right Column - Output
352
  with gr.Column(scale=2):
353
+ gr.Markdown("### πŸ“Š Analysis Results")
354
  output = gr.Markdown(
355
+ value="🎯 **Ready to analyze!** Upload a chart image and click 'Analyze Chart' to see AI-powered insights.",
356
+ height=500
357
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
 
359
+ # Examples section
360
+ gr.Markdown("### 🎯 Try These Example Charts")
361
+ examples = gr.Examples(
 
 
 
362
  examples=[
363
+ [Image.new('RGB', (400, 300), color='lightblue'), "detailed"],
 
364
  ],
365
  inputs=[image_input, analysis_type],
366
+ label="Click examples to test the analyzer"
367
  )
368
 
369
  # Event handlers
370
  analyze_btn.click(
371
+ fn=analyze_chart_interface,
372
+ inputs=[image_input, analysis_type],
373
+ outputs=output
374
  )
375
 
376
+ load_btn.click(
377
  fn=load_image_from_url,
378
  inputs=[url_input],
379
  outputs=[image_input, output]
380
  )
381
 
382
  clear_btn.click(
383
+ fn=clear_interface,
384
+ outputs=[image_input, url_input, output]
385
  )
386
 
387
+ # Launch application
388
  if __name__ == "__main__":
389
+ print("πŸš€ Launching Chart Analyzer...")
390
  try:
391
+ interface.launch(
392
  server_name="0.0.0.0",
393
  server_port=7860,
394
+ share=True,
395
+ debug=True
 
396
  )
397
  except Exception as e:
398
+ print(f"❌ Launch error: {e}")
399
+ print("πŸ”„ Trying alternative launch...")
400
+ interface.launch()