Thea231 commited on
Commit
f024bd3
·
verified ·
1 Parent(s): f83bdad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -1,7 +1,5 @@
1
  import streamlit as st
2
  from transformers import pipeline
3
- import torch
4
- from scipy.io.wavfile import write
5
  import numpy as np
6
 
7
  # Load Hugging Face pipelines
@@ -32,15 +30,19 @@ if st.button("Analyze Comment"):
32
  feedback = feedback_generator(f"emotion: {emotion_label} text: {comment_input}", max_length=50)[0]["generated_text"]
33
 
34
  # Convert feedback text to speech
35
- audio_output = text_to_audio(feedback)
36
- audio_array = np.array(audio_output["audio"]) * 32767 # Convert to int16 range
37
- audio_path = "feedback_audio.wav"
38
- write(audio_path, 22050, audio_array.astype(np.int16))
39
 
40
  # Display results
41
  st.subheader("Analysis Result")
42
  st.write(f"### **Emotion:** {emotion_label} (Confidence: {emotion_score})")
43
  st.write(f"### **Generated Feedback:** {feedback}")
44
 
45
- # Audio output
46
- st.audio(audio_path, format="audio/wav")
 
 
 
 
 
 
1
  import streamlit as st
2
  from transformers import pipeline
 
 
3
  import numpy as np
4
 
5
  # Load Hugging Face pipelines
 
30
  feedback = feedback_generator(f"emotion: {emotion_label} text: {comment_input}", max_length=50)[0]["generated_text"]
31
 
32
  # Convert feedback text to speech
33
+ st.text('Generating audio data...')
34
+ audio_data = text_to_audio(feedback)
35
+
 
36
 
37
  # Display results
38
  st.subheader("Analysis Result")
39
  st.write(f"### **Emotion:** {emotion_label} (Confidence: {emotion_score})")
40
  st.write(f"### **Generated Feedback:** {feedback}")
41
 
42
+
43
+ # Play button
44
+ if st.button("Play Audio"):
45
+ st.audio(audio_data['audio'],
46
+ format="audio/wav",
47
+ start_time=0,
48
+ sample_rate = audio_data['sampling_rate'])