gowshiselva commited on
Commit
90a72f2
·
verified ·
1 Parent(s): 1a93d9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -35,7 +35,6 @@ def generate_advanced_description(image, detail_level, emotion_focus, style_focu
35
  return "Please upload an image to generate a description."
36
 
37
  # Process image for both models
38
- blip2_inputs = blip2_processor(images=image, return_tensors="pt").to(device)
39
  blip_inputs = blip_processor(images=image, return_tensors="pt").to(device)
40
 
41
  # Basic prompts for different aspects
@@ -54,10 +53,13 @@ def generate_advanced_description(image, detail_level, emotion_focus, style_focu
54
  basic_caption = blip_processor.decode(basic_outputs[0], skip_special_tokens=True)
55
 
56
  # Get detailed description from BLIP-2
 
 
 
 
57
  outputs = blip2_model.generate(
58
  **blip2_inputs,
59
  max_length=150 + (detail_level * 50),
60
- prompt=combined_prompt,
61
  num_beams=5,
62
  min_length=50,
63
  top_p=0.9,
 
35
  return "Please upload an image to generate a description."
36
 
37
  # Process image for both models
 
38
  blip_inputs = blip_processor(images=image, return_tensors="pt").to(device)
39
 
40
  # Basic prompts for different aspects
 
53
  basic_caption = blip_processor.decode(basic_outputs[0], skip_special_tokens=True)
54
 
55
  # Get detailed description from BLIP-2
56
+ # BLIP-2 requires text input to be processed with the image
57
+ text = "a detailed description: " + combined_prompt
58
+ blip2_inputs = blip2_processor(image, text=text, return_tensors="pt").to(device)
59
+
60
  outputs = blip2_model.generate(
61
  **blip2_inputs,
62
  max_length=150 + (detail_level * 50),
 
63
  num_beams=5,
64
  min_length=50,
65
  top_p=0.9,