Cheselle commited on
Commit
ab3a26b
·
verified ·
1 Parent(s): 831b696

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -1,15 +1,22 @@
1
  import requests
2
  from PIL import Image
3
  from transformers import BlipProcessor, BlipForConditionalGeneration
4
- import streamlit as st
5
 
6
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
7
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
8
 
9
- file_name = st.file_uploader("Upload")
 
10
 
11
- if file_name is not None:
12
- inputs = processor(Image.open(file_name).convert('RGB'), return_tensors="pt")
13
-
14
- out = model.generate(**inputs)
15
- print(processor.decode(out[0], skip_special_tokens=True))
 
 
 
 
 
 
 
 
1
  import requests
2
  from PIL import Image
3
  from transformers import BlipProcessor, BlipForConditionalGeneration
 
4
 
5
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
6
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
7
 
8
+ img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
9
+ raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
10
 
11
+ # conditional image captioning
12
+ text = "a photography of"
13
+ inputs = processor(raw_image, text, return_tensors="pt")
14
+
15
+ out = model.generate(**inputs)
16
+ print(processor.decode(out[0], skip_special_tokens=True))
17
+
18
+ # unconditional image captioning
19
+ inputs = processor(raw_image, return_tensors="pt")
20
+
21
+ out = model.generate(**inputs)
22
+ print(processor.decode(out[0], skip_special_tokens=True))