mkoot007 commited on
Commit
5b4361c
·
1 Parent(s): 9e6a3e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -1,9 +1,12 @@
1
  import streamlit as st
2
  from PIL import Image
3
- from transformers import pipeline
4
 
5
- # Initialize the image captioning model
6
- image_captioning_model = pipeline("image-captioning", model="Salesforce/blip-image-captioning-large")
 
 
 
7
 
8
  # Streamlit app title and description
9
  st.title("Image Information Extractor")
@@ -21,12 +24,14 @@ if image:
21
  st.write("Extracting information from the image...")
22
 
23
  # Use the image captioning model to generate a description
24
- captions = image_captioning_model(image)
 
 
 
25
 
26
  # Display the image description
27
  st.write("Image Description:")
28
- for caption in captions:
29
- st.write(caption)
30
 
31
  # You can add more processing or explanations here if needed
32
 
 
1
  import streamlit as st
2
  from PIL import Image
3
+ from transformers import AutoFeatureExtractor, AutoModelForImageCaptioning, AutoTokenizer
4
 
5
+ # Load the image captioning model
6
+ model_name = "Salesforce/blip-image-captioning-large"
7
+ feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForImageCaptioning.from_pretrained(model_name)
10
 
11
  # Streamlit app title and description
12
  st.title("Image Information Extractor")
 
24
  st.write("Extracting information from the image...")
25
 
26
  # Use the image captioning model to generate a description
27
+ inputs = tokenizer(image, return_tensors="pt")
28
+ with st.spinner("Generating caption..."):
29
+ captions = model.generate(**inputs)
30
+ caption = tokenizer.decode(captions[0], skip_special_tokens=True)
31
 
32
  # Display the image description
33
  st.write("Image Description:")
34
+ st.write(caption)
 
35
 
36
  # You can add more processing or explanations here if needed
37