Update app.py
Browse files
app.py
CHANGED
|
@@ -77,17 +77,23 @@ def generate_image(api_key, text):
|
|
| 77 |
else:
|
| 78 |
return None
|
| 79 |
|
|
|
|
| 80 |
# Function to get predictions
|
| 81 |
def get_predictions(audio_input):
|
| 82 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
|
|
|
| 83 |
image = generate_image(api_key, emotion_prediction)
|
| 84 |
-
return emotion_prediction, image
|
| 85 |
|
| 86 |
# Create the Gradio interface
|
| 87 |
interface = gr.Interface(
|
| 88 |
fn=get_predictions,
|
| 89 |
-
inputs=gr.Audio(label="Input Audio", type="
|
| 90 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
)
|
| 92 |
|
| 93 |
interface.launch()
|
|
|
|
| 77 |
else:
|
| 78 |
return None
|
| 79 |
|
| 80 |
+
|
| 81 |
# Function to get predictions
|
| 82 |
def get_predictions(audio_input):
|
| 83 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
| 84 |
+
transcribed_text = transcribe(audio_input)
|
| 85 |
image = generate_image(api_key, emotion_prediction)
|
| 86 |
+
return emotion_prediction, transcribed_text, image
|
| 87 |
|
| 88 |
# Create the Gradio interface
|
| 89 |
interface = gr.Interface(
|
| 90 |
fn=get_predictions,
|
| 91 |
+
inputs=gr.Audio(label="Input Audio", type="file"),
|
| 92 |
+
outputs=[
|
| 93 |
+
gr.Label("Emotion Prediction"),
|
| 94 |
+
gr.Label("Transcribed Text"),
|
| 95 |
+
gr.Image(type='pil')
|
| 96 |
+
]
|
| 97 |
)
|
| 98 |
|
| 99 |
interface.launch()
|