Thea231 commited on
Commit
f4f89c9
·
verified ·
1 Parent(s): 9ea74b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -8
app.py CHANGED
@@ -1,15 +1,76 @@
1
  from transformers import pipeline
2
  import streamlit as st
3
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- pipe_caption = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
6
 
7
- st.title("AI story telling for kids") # Establish title
8
 
9
- uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
10
 
11
- if uploaded_file is not None: # Check if a file is uploaded
12
- image = Image.open(uploaded_file)
13
- st.image(image, caption="Uploaded Image", use_column_width=True)
14
- else:
15
- st.warning("Please upload an image.")
 
1
  from transformers import pipeline
2
  import streamlit as st
3
  from PIL import Image
4
+ from transformers import pipeline
5
+ from IPython.display import Audio
6
+
7
+
8
+ # img2text
9
+ def img2text(url):
10
+ image_to_text_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
11
+ text = image_to_text_model(url)[0]["generated_text"]
12
+ return text
13
+
14
+ # text2story
15
+ def text2story(text):
16
+ text_to_story_model = = pipeline("text-generation",model="distilbert/distilgpt2")
17
+ story_text = = text_to_story_model(text,min_length=100,num_return_sequences=1)
18
+ return story_text
19
+
20
+ # text2audio
21
+ def text2audio(story_text):
22
+ text_to_audio_model = pipeline("text-to-speech", model="facebook/mms-tts-eng")
23
+ audio_data = text_to_audio_model(story_text)
24
+ #audio_data = Audio(speech_output['audio'], rate=speech_output['sampling_rate'])
25
+ return audio_data
26
+
27
+
28
+ #main part
29
+ st.set_page_config(page_title="Your Image to Audio Story",
30
+ page_icon="🦜")
31
+ st.header("Turn Your Image to Story")
32
+ uploaded_file = st.file_uploader("Select an Image...")
33
+
34
+ if uploaded_file is not None:
35
+ print(uploaded_file)
36
+ bytes_data = uploaded_file.getvalue()
37
+ with open(uploaded_file.name, "wb") as file:
38
+ file.write(bytes_data)
39
+ st.image(uploaded_file, caption="Uploaded Image",
40
+ use_column_width=True)
41
+
42
+ #Stage 1: Image to Text
43
+ st.text('Processing img2text...')
44
+ scenario = img2text(uploaded_file.name)
45
+ st.write(scenario)
46
+
47
+ #Stage 2: Text to Story
48
+ st.text('Generating a story...')
49
+ story = text2story(scenario)
50
+ st.write(story)
51
+
52
+ Stage 3: Story to Audio data
53
+ st.text('Generating audio data...')
54
+ audio_data =text2audio(story)
55
+
56
+ # Play button
57
+ if st.button("Play Audio"):
58
+ st.audio(audio_data['audio'],
59
+ format="audio/wav",
60
+ start_time=0,
61
+ sample_rate = audio_data['sampling_rate'])
62
+ st.audio("kids_playing_audio.wav")
63
+
64
+
65
 
66
+ #pipe_caption = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
67
 
68
+ #st.title("AI story telling for kids") # Establish title
69
 
70
+ #uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
71
 
72
+ #if uploaded_file is not None: # Check if a file is uploaded
73
+ # image = Image.open(uploaded_file)
74
+ # st.image(image, caption="Uploaded Image", use_column_width=True)
75
+ #else:
76
+ # st.warning("Please upload an image.")