namuisam commited on
Commit
07f689d
·
verified ·
1 Parent(s): fdb5e18

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -17
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
  from transformers import pipeline
3
- import hashlib
4
 
5
  # Function definitions
6
  def img2text(url):
@@ -32,40 +31,41 @@ def main():
32
  uploaded_file = st.file_uploader("Select an Image...")
33
 
34
  if uploaded_file is not None:
35
- # Get file bytes and compute a hash
36
- bytes_data = uploaded_file.getvalue()
37
- file_hash = hashlib.sha256(bytes_data).hexdigest()
38
-
39
- # Reset session state only if the file content has changed
40
- if ("last_uploaded_hash" not in st.session_state) or (st.session_state.last_uploaded_hash != file_hash):
41
- st.session_state.scenario = None
42
- st.session_state.story = None
43
- st.session_state.audio_data = None
44
- st.session_state.last_uploaded_hash = file_hash
45
-
46
  # Save the uploaded file locally.
 
47
  with open(uploaded_file.name, "wb") as file:
48
  file.write(bytes_data)
49
  st.image(uploaded_file, caption="Uploaded Image", use_container_width=True)
50
-
 
 
 
 
 
 
 
 
51
  # Stage 1: Image to Text
52
  if st.session_state.scenario is None:
53
  st.text("Processing img2text...")
54
  st.session_state.scenario = img2text(uploaded_file.name)
55
  st.write(st.session_state.scenario)
56
-
57
  # Stage 2: Text to Story
58
  if st.session_state.story is None:
59
  st.text("Generating a story...")
60
  st.session_state.story = text2story(st.session_state.scenario)
61
  st.write(st.session_state.story)
62
-
63
  # Stage 3: Story to Audio data
64
  if st.session_state.audio_data is None:
65
  st.text("Generating audio data...")
66
  st.session_state.audio_data = text2audio(st.session_state.story)
67
-
68
- # Play Audio button – uses stored audio_data.
69
  if st.button("Play Audio"):
70
  st.audio(
71
  st.session_state.audio_data["audio"],
 
1
  import streamlit as st
2
  from transformers import pipeline
 
3
 
4
  # Function definitions
5
  def img2text(url):
 
31
  uploaded_file = st.file_uploader("Select an Image...")
32
 
33
  if uploaded_file is not None:
34
+ # Add a checkbox to allow forced regeneration.
35
+ force_regen = st.checkbox("Force regenerate story", value=False)
36
+
 
 
 
 
 
 
 
 
37
  # Save the uploaded file locally.
38
+ bytes_data = uploaded_file.getvalue()
39
  with open(uploaded_file.name, "wb") as file:
40
  file.write(bytes_data)
41
  st.image(uploaded_file, caption="Uploaded Image", use_container_width=True)
42
+
43
+ # Initialize/reset session state for this run.
44
+ if "scenario" not in st.session_state or force_regen:
45
+ st.session_state.scenario = None
46
+ if "story" not in st.session_state or force_regen:
47
+ st.session_state.story = None
48
+ if "audio_data" not in st.session_state or force_regen:
49
+ st.session_state.audio_data = None
50
+
51
  # Stage 1: Image to Text
52
  if st.session_state.scenario is None:
53
  st.text("Processing img2text...")
54
  st.session_state.scenario = img2text(uploaded_file.name)
55
  st.write(st.session_state.scenario)
56
+
57
  # Stage 2: Text to Story
58
  if st.session_state.story is None:
59
  st.text("Generating a story...")
60
  st.session_state.story = text2story(st.session_state.scenario)
61
  st.write(st.session_state.story)
62
+
63
  # Stage 3: Story to Audio data
64
  if st.session_state.audio_data is None:
65
  st.text("Generating audio data...")
66
  st.session_state.audio_data = text2audio(st.session_state.story)
67
+
68
+ # Play button – uses stored audio_data so the story won't be re-generated on every click.
69
  if st.button("Play Audio"):
70
  st.audio(
71
  st.session_state.audio_data["audio"],