muneebashraf commited on
Commit
e45e6f2
·
1 Parent(s): 4e130e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -11
app.py CHANGED
@@ -1,20 +1,26 @@
1
  import gradio as gr
2
  import requests
3
- from transformers import pipeline
 
 
 
 
4
 
5
- # Create a Gradio interface
6
  def caption_image(input_image):
7
-
8
- caption_model_url = "Salesforce/blip-image-captioning-large"
9
- files = {"file": open(input_image.name, "rb")}
10
- response = requests.post(caption_model_url, files=files)
11
- caption = response.json()["caption"]
 
 
12
 
13
- # Use the Transformers pipeline for sentiment analysis
14
- sentiment_model = pipeline("sentiment-analysis")
15
- sentiment_score = sentiment_model(caption)[0]["label"]
 
16
 
17
- return f"Caption: {caption}\nSentiment: {sentiment_score}"
18
 
19
  # Define the Gradio input interface
20
  inputs = gr.inputs.Image()
 
1
  import gradio as gr
2
  import requests
3
+ from PIL import Image
4
+ from transformers import BlipProcessor, BlipForConditionalGeneration
5
+
6
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
7
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
8
 
 
9
  def caption_image(input_image):
10
+ raw_image = Image.open(input_image).convert('RGB')
11
+
12
+ # Conditional image captioning
13
+ text = "a photography of"
14
+ inputs = processor(raw_image, text, return_tensors="pt")
15
+ out = model.generate(**inputs)
16
+ caption_conditional = processor.decode(out[0], skip_special_tokens=True)
17
 
18
+ # Unconditional image captioning
19
+ inputs = processor(raw_image, return_tensors="pt")
20
+ out = model.generate(**inputs)
21
+ caption_unconditional = processor.decode(out[0], skip_special_tokens=True)
22
 
23
+ return f"Conditional Caption: {caption_conditional}\nUnconditional Caption: {caption_unconditional}"
24
 
25
  # Define the Gradio input interface
26
  inputs = gr.inputs.Image()