fpessanha's picture
Update app.py
c88554e verified
raw
history blame
4.37 kB
import gradio as gr
import pandas as pd
import os
import gradio as gr
from pyannote.core import Annotation, Segment
from pyannote.audio import Pipeline
# List of all audio files to annotate
file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
# Initialize an empty DataFrame to store annotations
annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'comments'])
current_index = {"index": 0} # Dictionary to allow modifying inside functions
def load_example(index):
"""Load the example (audio + text) by index."""
row = file_list.iloc[index]
audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav')
print(f"Audio path: {audio_path}, Exists: {os.path.exists(audio_path)}")
sentence = row["SENTENCE"]
# If the user already made an annotation for this example, gradio will return said annotation
previous_annotation = (
annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["SAMPLE ID"], "emotion": '',
"comments": ''}
)
return (sentence, audio_path, previous_annotation['emotion'], previous_annotation["comments"])
def save_annotation(emotions, comments):
"""Save the annotation for the current example."""
idx = current_index["index"]
row = file_list.iloc[idx]
sample_id = row["SAMPLE ID"]
sentence = row["SENTENCE"]
# Update or append annotation
if sample_id in annotations["sample_id"].values:
annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "comments"]] = \
[emotions, comments]
else:
annotations.loc[len(annotations)] = [sample_id, sentence, emotions, comments]
annotations.to_csv("annotations.csv", index=False) # Save to a CSV file
#return f"Saved annotations for example {idx + 1}"
def next_example(emotions, comments):
"""Move to the next example."""
save_annotation(emotions, comments)
if current_index["index"] < len(file_list) - 1:
current_index["index"] += 1
return load_example(current_index["index"])
return "End of examples", None, 0, 0, 0, 0, ''
def previous_example(emotion, comments):
"""Move to the previous example."""
save_annotation(emotion, comments)
if current_index["index"] > 0:
current_index["index"] -= 1
return load_example(current_index["index"])
return load_example(current_index["index"])
# Gradio Interface
audio_path = 'test.mp4'
with (gr.Blocks() as demo):
with gr.Row():
audio_player = gr.Audio(value=audio_path, label="Audio", type="filepath", interactive=False)
with gr.Row():
with gr.Accordion(label="Click to see the sentence", open=False):
sentence_text = gr.Textbox(label="Sentence", interactive=False)
with gr.Row():
slider = gr.Slider(
minimum=-100,
maximum=100,
step=1,
label="Sentiment Slider",
info="Slide to the left for negative sentiment, to the right for positive sentiment",
show_label=True,
elem_classes=["sentiment-slider"]
)
emotions = gr.Radio(["Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion")
confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10)
# Instructions for emotion annotation
with gr.Sidebar():
gr.Textbox()
gr.Button()
with gr.Row():
save_button = gr.Button("Save Annotation")
next_button = gr.Button("Next Example")
previous_button = gr.Button("Previous Example")
comments = gr.Textbox(label="Comments", interactive=True)
# Initial load
sentence_text.value, audio_player.value, emotions.value, comments.value = load_example(
current_index["index"]
)
save_button.click(
save_annotation,
inputs=[emotions, comments]
)
next_button.click(
next_example,
inputs=[emotions, comments],
outputs=[sentence_text, audio_player, emotions, comments],
)
previous_button.click(
previous_example,
inputs=[emotions, comments],
outputs=[sentence_text, audio_player, emotions, comments],
)
demo.launch()