import gradio as gr import pandas as pd import os import gradio as gr # List of all audio files to annotate file_list = pd.read_excel(os.path.join('combined_annotations.xlsx')) # Initialize an empty DataFrame to store annotations annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'comments']) current_index = {"index": 0} # Dictionary to allow modifying inside functions def load_example(index): """Load the example (audio + text) by index.""" row = file_list.iloc[index] audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav') print(f"Audio path: {audio_path}, Exists: {os.path.exists(audio_path)}") sentence = row["SENTENCE"] # If the user already made an annotation for this example, gradio will return said annotation previous_annotation = ( annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["SAMPLE ID"], "emotion": '', "comments": ''} ) return (sentence, audio_path, previous_annotation['emotion'], previous_annotation["comments"]) def save_annotation(emotions, comments): """Save the annotation for the current example.""" idx = current_index["index"] row = file_list.iloc[idx] sample_id = row["SAMPLE ID"] sentence = row["SENTENCE"] # Update or append annotation if sample_id in annotations["sample_id"].values: annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "comments"]] = \ [emotions, comments] else: annotations.loc[len(annotations)] = [sample_id, sentence, emotions, comments] annotations.to_csv("annotations.csv", index=False) # Save to a CSV file #return f"Saved annotations for example {idx + 1}" def next_example(emotions, comments): if emotions == '': raise gr.Error("Please fill out the emotion section") else: """Move to the next example.""" save_annotation(emotions, comments) if current_index["index"] < len(file_list) - 1: current_index["index"] += 1 return load_example(current_index["index"]) #return "End of examples", None, 0, 0, 0, 0, '' def previous_example(emotion, comments): """Move to the previous example.""" save_annotation(emotion, comments) if current_index["index"] > 0: current_index["index"] -= 1 return load_example(current_index["index"]) return load_example(current_index["index"]) # Gradio Interface audio_path = 'test.mp3' with (gr.Blocks() as demo): # Row with audio player with gr.Row(): audio_player = gr.Audio(value=audio_path, label="Audio", type="filepath", interactive=False) # Hidden row with corresponding sentence with gr.Row(): with gr.Accordion(label="Click to see the sentence", open=False): sentence_text = gr.Textbox(label="Sentence", interactive=False) # Row for emotion annotation and confidence with gr.Row(): emotions = gr.Radio(["Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion") confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10) # Instructions for emotion annotation with gr.Sidebar(): gr.Textbox() # Next and Previous Buttons with gr.Row(): next_button = gr.Button("Next Example") previous_button = gr.Button("Previous Example") comments = gr.Textbox(label="Comments", interactive=True) # Initial load sentence_text.value, audio_player.value, emotions.value, comments.value = load_example( current_index["index"] ) next_button.click( next_example, inputs=[emotions, comments], outputs=[sentence_text, audio_player, emotions, comments], ) previous_button.click( previous_example, inputs=[emotions, comments], outputs=[sentence_text, audio_player, emotions, comments], ) demo.launch()