import gradio as gr import pandas as pd import os import gradio as gr # List of all audio files to annotate file_list = pd.read_excel(os.path.join('combined_annotations.xlsx')) annotations_completed = 0 total_annotations = len(file_list) # Initialize an empty DataFrame to store annotations annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'comments']) current_index = {"index": 0} # Dictionary to allow modifying inside functions def load_example(index): """Loads the example in row #index from dataframe file_list. If there are any annotations it will give those values to the annotation dataframe""" row = file_list.iloc[index] audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav') print(f"Audio path: {audio_path}, Exists: {os.path.exists(audio_path)}") sentence = row["SENTENCE"] # If the user already made an annotation for this example, gradio will return said annotation previous_annotation = ( annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["SAMPLE ID"], "emotion": '', "comments": ''} ) return (sentence, audio_path, previous_annotation['emotion'], previous_annotation["comments"]) def save_annotation(emotions, comments): """Save the annotation for the current example.""" idx = current_index["index"] row = file_list.iloc[idx] sample_id = row["SAMPLE ID"] sentence = row["SENTENCE"] # Update or append annotation if sample_id in annotations["sample_id"].values: annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "comments"]] = \ [emotions, comments] else: annotations.loc[len(annotations)] = [sample_id, sentence, emotions, comments] annotations.to_csv("annotations.csv", index=False) # Save to a CSV file def next_example(emotions, comments): """Move to the next example.""" if emotions == '': raise gr.Error("Please fill out the emotion section") else: save_annotation(emotions, comments) if current_index["index"] < len(file_list) - 1: current_index["index"] += 1 return load_example(current_index["index"]) #return "End of examples", None, 0, 0, 0, 0, '' def previous_example(emotion, comments): """Move to the previous example.""" save_annotation(emotion, comments) if current_index["index"] > 0: current_index["index"] -= 1 return load_example(current_index["index"]) return load_example(current_index["index"]) def update_progress(): """Update annotation progress""" global annotations_completed if annotations_completed < total_annotations: annotations_completed += 1 progress = annotations_completed / total_annotations return f"Progress: {annotations_completed}/{annotations_tasks}" # Gradio Interface audio_path = 'test.mp3' with (gr.Blocks(theme=gr.themes.Soft()) as demo): gr.Markdown("# Task Progress Tracker") progress_text = gr.Textbox(value="Progress: 0/10", interactive=False) # Row with audio player with gr.Row(): audio_player = gr.Audio(value=audio_path, label="Audio", type="filepath", interactive=False) # Hidden row with corresponding sentence with gr.Row(): with gr.Accordion(label="Click to see the sentence", open=False): sentence_text = gr.Textbox(label="Sentence", interactive=False) # Row for emotion annotation and confidence with gr.Row(): emotions = gr.Radio(["Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion") confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10) # Instructions for emotion annotation with gr.Sidebar(): happy_words = gr.Textbox(label = "Happy") # Next and Previous Buttons with gr.Row(): previous_button = gr.Button("Previous Example") next_button = gr.Button("Next Example") comments = gr.Textbox(label="Comments", interactive=True) sentence_text.value, audio_player.value, emotions.value, comments.value = load_example( current_index["index"]) previous_button.click( previous_example, inputs=[emotions, comments], outputs=[sentence_text, audio_player, emotions, comments], ) next_button.click( next_example, inputs=[emotions, comments], outputs=[sentence_text, audio_player, emotions, comments], ) next_button.click(update_progress, outputs=[progress_text]) demo.launch()