saakshigupta commited on
Commit
c861c68
·
verified ·
1 Parent(s): 2f6de0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -28
app.py CHANGED
@@ -220,16 +220,27 @@ with st.sidebar:
220
  if not st.session_state.blip_model_loaded:
221
  if st.button("📥 Load BLIP Models", type="primary"):
222
  # Load BLIP models
223
- original_processor, original_model, finetuned_processor, finetuned_model = load_blip_models()
224
- if all([original_processor, original_model, finetuned_processor, finetuned_model]):
225
- st.session_state.original_processor = original_processor
226
- st.session_state.original_model = original_model
227
- st.session_state.finetuned_processor = finetuned_processor
228
- st.session_state.finetuned_model = finetuned_model
229
- st.session_state.blip_model_loaded = True
230
- st.success("✅ BLIP models loaded!")
231
- else:
232
- st.error("❌ Failed to load BLIP models.")
 
 
 
 
 
 
 
 
 
 
 
233
  else:
234
  st.success("✅ BLIP models loaded")
235
 
@@ -399,24 +410,6 @@ def load_detection_model_xception():
399
 
400
  # ----- BLIP Image Captioning -----
401
 
402
- # Function to load BLIP captioning models
403
- @st.cache_resource
404
- def load_blip_models():
405
- with st.spinner("Loading BLIP captioning models..."):
406
- try:
407
- # Load original BLIP model for general image captioning
408
- original_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
409
- original_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
410
-
411
- # Load fine-tuned BLIP model for GradCAM analysis
412
- finetuned_processor = BlipProcessor.from_pretrained("saakshigupta/gradcam-xception-finetuned")
413
- finetuned_model = BlipForConditionalGeneration.from_pretrained("saakshigupta/gradcam-xception-finetuned")
414
-
415
- return original_processor, original_model, finetuned_processor, finetuned_model
416
- except Exception as e:
417
- st.error(f"Error loading BLIP models: {str(e)}")
418
- return None, None, None, None
419
-
420
  # Function to generate image caption using BLIP's VQA approach for GradCAM
421
  def generate_gradcam_caption(image, processor, model, max_length=60):
422
  """
 
220
  if not st.session_state.blip_model_loaded:
221
  if st.button("📥 Load BLIP Models", type="primary"):
222
  # Load BLIP models
223
+ try:
224
+ with st.spinner("Loading BLIP captioning models..."):
225
+ # Load original BLIP model for general image captioning
226
+ original_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
227
+ original_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
228
+
229
+ # Load fine-tuned BLIP model for GradCAM analysis
230
+ finetuned_processor = BlipProcessor.from_pretrained("saakshigupta/gradcam-xception-finetuned")
231
+ finetuned_model = BlipForConditionalGeneration.from_pretrained("saakshigupta/gradcam-xception-finetuned")
232
+
233
+ if all([original_processor, original_model, finetuned_processor, finetuned_model]):
234
+ st.session_state.original_processor = original_processor
235
+ st.session_state.original_model = original_model
236
+ st.session_state.finetuned_processor = finetuned_processor
237
+ st.session_state.finetuned_model = finetuned_model
238
+ st.session_state.blip_model_loaded = True
239
+ st.success("✅ BLIP models loaded!")
240
+ else:
241
+ st.error("❌ Failed to load BLIP models.")
242
+ except Exception as e:
243
+ st.error(f"Error loading BLIP models: {str(e)}")
244
  else:
245
  st.success("✅ BLIP models loaded")
246
 
 
410
 
411
  # ----- BLIP Image Captioning -----
412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413
  # Function to generate image caption using BLIP's VQA approach for GradCAM
414
  def generate_gradcam_caption(image, processor, model, max_length=60):
415
  """