Pravalika56 commited on
Commit
68577fd
·
verified ·
1 Parent(s): 4b01040

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -7
app.py CHANGED
@@ -1,11 +1,26 @@
 
1
  from transformers import BlipProcessor, BlipForConditionalGeneration
2
  from PIL import Image
3
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
4
- model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
5
 
6
- image = Image.open("skydive.jpg")
7
- inputs = processor(image, return_tensors = "pt")
8
- outputs = model.generate(**inputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- caption = processor.decode(outputs[0], skip_special_tokens = True)
11
- print(caption)
 
1
+ import gradio as gr
2
  from transformers import BlipProcessor, BlipForConditionalGeneration
3
  from PIL import Image
4
+ import torch
 
5
 
6
+ def generate_caption(input_image):
7
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
8
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
9
+
10
+ image = Image.open(input_image)
11
+ inputs = processor(image, return_tensors="pt")
12
+ outputs = model.generate(**inputs)
13
+
14
+ caption = processor.decode(outputs[0], skip_special_tokens=True)
15
+ return caption
16
+
17
+ iface = gr.Interface(
18
+ fn=generate_caption,
19
+ inputs=gr.inputs.Image(),
20
+ outputs="text",
21
+ title="Image Captioning",
22
+ description="Upload an image to generate a caption."
23
+ )
24
+
25
+ iface.launch()
26