Assignment1 / app.py
CR7CAD's picture
Update app.py
5f21a2d verified
raw
history blame
5.9 kB
# import part
import streamlit as st
from transformers import pipeline
import os
import tempfile
# function part
# img2text
def img2text(image_path):
image_to_text = pipeline("image-to-text", model="sooh-j/blip-image-captioning-base")
text = image_to_text(image_path)[0]["generated_text"]
return text
# text2story
def text2story(text):
# Using a smaller text generation model
generator = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
# Create a prompt for the story generation
prompt = f"Write a fun children's story based on this: {text}. Once upon a time, "
# Generate the story
story_result = generator(
prompt,
max_length=150,
num_return_sequences=1,
temperature=0.7,
top_k=50,
top_p=0.95,
do_sample=True
)
# Extract the generated text
story_text = story_result[0]['generated_text']
story_text = story_text.replace(prompt, "Once upon a time, ")
# Make sure the story is at least 100 words
words = story_text.split()
if len(words) > 100:
# Simply truncate to 100 words
story_text = " ".join(words[:100])
return story_text
# text2audio - REVISED to correctly handle the audio output
def text2audio(story_text):
try:
# Use a different TTS model that works reliably with pipeline
synthesizer = pipeline("text-to-speech", model="microsoft/speecht5_tts")
# Additional input required for this model
speaker_embeddings = pipeline(
"audio-classification",
model="microsoft/speecht5_speaker_embeddings"
)("some_audio_file.mp3")["logits"]
# Limit text length to avoid timeouts
max_chars = 500
if len(story_text) > max_chars:
last_period = story_text[:max_chars].rfind('.')
if last_period > 0:
story_text = story_text[:last_period + 1]
else:
story_text = story_text[:max_chars]
# Generate speech with correct parameters
speech = synthesizer(
text=story_text,
forward_params={"speaker_embeddings": speaker_embeddings}
)
# Create a temporary WAV file
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.wav')
temp_filename = temp_file.name
temp_file.close()
# Display the structure of the speech output for debugging
st.write(f"Speech output keys: {speech.keys()}")
# Save the audio data to the temporary file
# Different models have different output formats, we'll try common keys
if 'audio' in speech:
# Convert numpy array to WAV file
try:
import scipy.io.wavfile as wavfile
wavfile.write(temp_filename, speech['sampling_rate'], speech['audio'])
except ImportError:
# If scipy is not available, try raw writing
with open(temp_filename, 'wb') as f:
# Convert numpy array to bytes in a simple way
if isinstance(speech['audio'], np.ndarray):
audio_bytes = speech['audio'].tobytes()
f.write(audio_bytes)
else:
f.write(speech['audio'])
elif 'numpy_array' in speech:
with open(temp_filename, 'wb') as f:
f.write(speech['numpy_array'].tobytes())
else:
# Fallback: try to write whatever is available
with open(temp_filename, 'wb') as f:
# Just write the first value that seems like it could be audio data
for key, value in speech.items():
if isinstance(value, (bytes, bytearray)) or (
isinstance(value, np.ndarray) and value.size > 1000):
if isinstance(value, np.ndarray):
f.write(value.tobytes())
else:
f.write(value)
break
return temp_filename
except Exception as e:
st.error(f"Error generating audio: {str(e)}")
# Print all available keys for debugging
return None
# Function to save temporary image file
def save_uploaded_image(uploaded_file):
if not os.path.exists("temp"):
os.makedirs("temp")
image_path = os.path.join("temp", uploaded_file.name)
with open(image_path, "wb") as f:
f.write(uploaded_file.getvalue())
return image_path
# main part
st.set_page_config(page_title="Your Image to Audio Story", page_icon="🦜")
st.header("Turn Your Image to Audio Story")
uploaded_file = st.file_uploader("Select an Image...")
if uploaded_file is not None:
# Display the uploaded image
st.image(uploaded_file, caption="Uploaded Image", use_container_width=True)
# Save the image temporarily
image_path = save_uploaded_image(uploaded_file)
# Stage 1: Image to Text
st.text('Processing img2text...')
caption = img2text(image_path)
st.write(caption)
# Stage 2: Text to Story
st.text('Generating a story...')
story = text2story(caption)
st.write(story)
# Stage 3: Story to Audio data
st.text('Generating audio data...')
audio_file = text2audio(story)
# Play button
if st.button("Play Audio"):
if audio_file and os.path.exists(audio_file):
# Play the audio file
st.audio(audio_file)
else:
st.error("Audio generation failed. Please try again.")
# Clean up the temporary files
try:
os.remove(image_path)
# Don't delete audio file immediately as it might still be playing
except:
pass