File size: 3,551 Bytes
efe4c0f
90bef38
8d5fabf
ab8ead3
118cd25
ad4186a
ab8ead3
ad4186a
 
cd245d5
8d5fabf
efe4c0f
5f21a2d
ad4186a
7c4bc18
5f21a2d
7c4bc18
5f21a2d
 
7c4bc18
5f21a2d
 
 
 
 
 
 
7c4bc18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f21a2d
 
efe4c0f
cd245d5
efe4c0f
 
ad4186a
8d5fabf
ad4186a
 
 
4e37056
ab8ead3
ad4186a
 
f006a50
ad4186a
ab8ead3
f006a50
ad4186a
efe4c0f
 
ad4186a
 
 
efe4c0f
 
ad4186a
 
 
efe4c0f
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# Only the two imports you requested
import streamlit as st
from transformers import pipeline
from PIL import Image

# Simple image-to-text function
def img2text(image):
    image_to_text = pipeline("image-to-text", model="sooh-j/blip-image-captioning-base")
    text = image_to_text(image)[0]["generated_text"]
    return text

# Simple text-to-story function
def text2story(text):
    generator = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
    prompt = f"Write a short children's story based on this: {text}. The story should have a clear beginning, middle, and end. Keep it under 150 words. Once upon a time, "
    
    # Generate a longer text to ensure we get a complete story
    story_result = generator(
        prompt,
        max_length=300,
        num_return_sequences=1,
        temperature=0.7,
        do_sample=True
    )
   
    story_text = story_result[0]['generated_text']
    story_text = story_text.replace(prompt, "Once upon a time, ")
    
    # Find natural ending points (end of sentences)
    periods = [i for i, char in enumerate(story_text) if char == '.']
    question_marks = [i for i, char in enumerate(story_text) if char == '?']
    exclamation_marks = [i for i, char in enumerate(story_text) if char == '!']
    
    # Combine all ending punctuation and sort
    all_endings = sorted(periods + question_marks + exclamation_marks)
    
    # If we have any sentence endings
    if all_endings:
        # Get the index where the story should reasonably end (after at least 100 characters)
        min_story_length = 100
        suitable_endings = [i for i in all_endings if i >= min_story_length]
        
        if suitable_endings:
            # Find an ending that completes a thought (not just the first sentence)
            if len(suitable_endings) > 2:
                # Use the third sentence ending or later for a more complete story
                return story_text[:suitable_endings[2]+1]
            else:
                # If we don't have many sentences, use the last one we found
                return story_text[:suitable_endings[-1]+1]
    
    # If no good ending is found, return as is
    return story_text

# Simple text-to-audio function
def text2audio(story_text):
    synthesizer = pipeline("text-to-speech", model="HelpingAI/HelpingAI-TTS-v1")
    speech = synthesizer(story_text)
    return speech

# Basic Streamlit interface
st.title("Image to Audio Story")
uploaded_file = st.file_uploader("Upload an image")

if uploaded_file is not None:
    # Display image
    st.image(uploaded_file, caption="Uploaded Image")
    
    # Convert to PIL Image
    image = Image.open(uploaded_file)
    
    # Image to Text
    st.write("Generating caption...")
    caption = img2text(image)
    st.write(f"Caption: {caption}")
    
    # Text to Story
    st.write("Creating story...")
    story = text2story(caption)
    st.write(f"Story: {story}")
    
    # Text to Audio
    st.write("Generating audio...")
    speech_output = text2audio(story)
    
    # Play audio
    try:
        if 'audio' in speech_output and 'sampling_rate' in speech_output:
            st.audio(speech_output['audio'], sample_rate=speech_output['sampling_rate'])
        elif 'audio_array' in speech_output and 'sampling_rate' in speech_output:
            st.audio(speech_output['audio_array'], sample_rate=speech_output['sampling_rate'])
        else:
            st.write("Audio generated but could not be played.")
    except Exception as e:
        st.error(f"Error playing audio: {e}")