Update app.py
Browse files
app.py
CHANGED
|
@@ -1,86 +1,86 @@
|
|
| 1 |
-
# app.py
|
| 2 |
-
|
| 3 |
import streamlit as st
|
| 4 |
from PIL import Image
|
| 5 |
-
from transformers import BlipProcessor, BlipForConditionalGeneration, AutoTokenizer, AutoModelForCausalLM
|
| 6 |
import torch
|
| 7 |
-
import
|
|
|
|
| 8 |
import io
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
| 11 |
|
|
|
|
| 12 |
@st.cache_resource
|
| 13 |
def load_caption_model():
|
| 14 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 15 |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 16 |
return processor, model
|
| 17 |
|
| 18 |
-
def generate_caption(image):
|
| 19 |
-
processor, model = load_caption_model()
|
| 20 |
-
inputs = processor(images=image, return_tensors="pt")
|
| 21 |
-
out = model.generate(**inputs)
|
| 22 |
-
return processor.decode(out[0], skip_special_tokens=True)
|
| 23 |
-
|
| 24 |
-
# ----------- Stage 2: Description to Story -----------
|
| 25 |
-
|
| 26 |
@st.cache_resource
|
| 27 |
def load_story_model():
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
)
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
| 68 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 69 |
|
| 70 |
-
# Stage 1
|
| 71 |
with st.spinner("Generating description..."):
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
from PIL import Image
|
|
|
|
| 3 |
import torch
|
| 4 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 5 |
+
from transformers import pipeline
|
| 6 |
import io
|
| 7 |
|
| 8 |
+
st.set_page_config(page_title="Image Storytelling App", layout="centered")
|
| 9 |
+
|
| 10 |
+
# Title
|
| 11 |
+
st.title("πΌοΈβπβπ£οΈ Image Storytelling for Children")
|
| 12 |
|
| 13 |
+
# Load models (with caching)
|
| 14 |
@st.cache_resource
|
| 15 |
def load_caption_model():
|
| 16 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 17 |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 18 |
return processor, model
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
@st.cache_resource
|
| 21 |
def load_story_model():
|
| 22 |
+
return pipeline("text-generation", model="cahya/gpt2-small-indonesian-522M", device=0 if torch.cuda.is_available() else -1)
|
| 23 |
+
|
| 24 |
+
@st.cache_resource
|
| 25 |
+
def load_tts_model():
|
| 26 |
+
from TTS.api import TTS
|
| 27 |
+
return TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False, gpu=torch.cuda.is_available())
|
| 28 |
+
|
| 29 |
+
# Step 1: Generate caption
|
| 30 |
+
def generate_caption(image):
|
| 31 |
+
processor, model = load_caption_model()
|
| 32 |
+
try:
|
| 33 |
+
inputs = processor(images=[image], return_tensors="pt") # π§ fix: wrap in list
|
| 34 |
+
out = model.generate(**inputs)
|
| 35 |
+
return processor.decode(out[0], skip_special_tokens=True)
|
| 36 |
+
except Exception as e:
|
| 37 |
+
st.error(f"Image captioning failed: {e}")
|
| 38 |
+
return None
|
| 39 |
+
|
| 40 |
+
# Step 2: Generate story from caption
|
| 41 |
+
def generate_story(caption):
|
| 42 |
+
story_model = load_story_model()
|
| 43 |
+
prompt = f"Write a short story of 50 to 100 words for children about: {caption}"
|
| 44 |
+
outputs = story_model(prompt, max_new_tokens=120, do_sample=True, temperature=0.85)
|
| 45 |
+
return outputs[0]["generated_text"].strip()
|
| 46 |
+
|
| 47 |
+
# Step 3: Convert story to speech
|
| 48 |
+
def generate_audio(story):
|
| 49 |
+
tts = load_tts_model()
|
| 50 |
+
try:
|
| 51 |
+
audio_array = tts.tts(story)
|
| 52 |
+
byte_io = io.BytesIO()
|
| 53 |
+
tts.save_wav(audio_array, byte_io)
|
| 54 |
+
byte_io.seek(0)
|
| 55 |
+
return byte_io.read()
|
| 56 |
+
except Exception as e:
|
| 57 |
+
st.error(f"Audio generation failed: {e}")
|
| 58 |
+
return None
|
| 59 |
+
|
| 60 |
+
# App UI
|
| 61 |
+
uploaded_file = st.file_uploader("Upload an image (illustration or drawing)", type=["jpg", "jpeg", "png"])
|
| 62 |
+
|
| 63 |
+
if uploaded_file:
|
| 64 |
+
image = Image.open(uploaded_file).convert("RGB")
|
| 65 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 66 |
|
|
|
|
| 67 |
with st.spinner("Generating description..."):
|
| 68 |
+
caption = generate_caption(image)
|
| 69 |
+
|
| 70 |
+
if caption:
|
| 71 |
+
st.subheader("π Description")
|
| 72 |
+
st.info(caption)
|
| 73 |
+
|
| 74 |
+
with st.spinner("Creating story..."):
|
| 75 |
+
story = generate_story(caption)
|
| 76 |
+
|
| 77 |
+
if story:
|
| 78 |
+
st.subheader("π Story")
|
| 79 |
+
st.write(story)
|
| 80 |
+
|
| 81 |
+
with st.spinner("Generating voice..."):
|
| 82 |
+
audio = generate_audio(story)
|
| 83 |
+
|
| 84 |
+
if audio:
|
| 85 |
+
st.subheader("π Listen to the Story")
|
| 86 |
+
st.audio(audio, format="audio/wav")
|