sample / app.py
ywanguj's picture
Update app.py
116f3d3 verified
# Import Part
import streamlit as st
import time
from PIL import Image
from transformers import pipeline
# Function Part
def generate_image_caption(image_path):
img2caption = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
result = img2caption(image_path)
return result[0]['generated_text']
def text2story(text):
text2story = pipeline("text-generation", model="pranavpsv/genre-story-generator-v2")
generated_story = text2story(text)
return generated_story[0]['generated_text']
# Main Part
# App title
st.title("Assignment")
# Write some text
st.write("Welcome to a demo app showcasting basic streamlit component")
# file upload
uploaded_image = st.file_uploader("Upload an image", type=['jpg','jpeg','png'], accept_multiple_files=False, key="file_uploader")
# uploaded_audio = st.file_uploader("Upload an audio file", type = ['mp3','mov','egg'])
if uploaded_image is not None:
with st.spinner("Loading image..."):
time.sleep(1) # Simulate a delay
image = Image.open(uploaded_image)
# st.image(image, caption='Uploaded Image', use_column_width=True)
# Generate caption
caption = generate_image_caption(image)
st.write("Generated Caption:", caption)
# Generate story
if st.button("Generate Story"):
with st.spinner("Generating story..."):
generated_story = text2story(caption)
st.write("Generated Story:", generated_story)
# #Play audio with apinner:
# if uploaded_audio is not None:
# with st.spinner("Loading audio..."):
# time.sleep(1)
# st.audo(uploaded_audio)
# # Button interaction:
# if st.button("Click Me"):
# st.write("you clicked the button!")