Spaces:
Sleeping
Sleeping
Upload image_captioning.py
Browse files- image_captioning.py +24 -0
image_captioning.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 4 |
+
|
| 5 |
+
# Load the models and processor
|
| 6 |
+
processor_base = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 7 |
+
model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 8 |
+
|
| 9 |
+
processor_large = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 10 |
+
model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 11 |
+
|
| 12 |
+
# Define the function for unconditional image captioning
|
| 13 |
+
def caption_image_unconditional(image):
|
| 14 |
+
inputs = processor_base(image, return_tensors="pt")
|
| 15 |
+
outputs = model_base.generate(**inputs)
|
| 16 |
+
caption = processor_base.decode(outputs[0], skip_special_tokens=True)
|
| 17 |
+
return caption
|
| 18 |
+
|
| 19 |
+
# Define the function for conditional image captioning
|
| 20 |
+
def caption_image_conditional(image, text):
|
| 21 |
+
inputs = processor_large(image, text, return_tensors="pt")
|
| 22 |
+
outputs = model_large.generate(**inputs)
|
| 23 |
+
caption = processor_large.decode(outputs[0], skip_special_tokens=True)
|
| 24 |
+
return caption
|