Spaces:
Sleeping
Sleeping
File size: 4,752 Bytes
8ae1600 79f135a 8ae1600 05ab087 d8e60e3 8ae1600 d8e60e3 8ae1600 d8e60e3 8ae1600 05ab087 8ae1600 d8e60e3 a02f3a0 8ae1600 d8e60e3 a02f3a0 d8e60e3 a02f3a0 d8e60e3 a02f3a0 8ae1600 d8e60e3 8ae1600 d8e60e3 a02f3a0 d8e60e3 a02f3a0 8ae1600 a02f3a0 8ae1600 a02f3a0 8ae1600 d8e60e3 8ae1600 a02f3a0 8ae1600 d8e60e3 8ae1600 d8e60e3 8ae1600 d8e60e3 8ae1600 d8e60e3 8ae1600 d8e60e3 8ae1600 d8e60e3 8ae1600 d8e60e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import gradio as gr
import pandas as pd
import os
import gradio as gr
# List of all audio files to annotate
file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
annotations_completed = 0
total_annotations = len(file_list)
# Initialize an empty DataFrame to store annotations
annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'comments'])
current_index = {"index": 0} # Dictionary to allow modifying inside functions
def load_example(index):
"""Loads the example in row #index from dataframe file_list.
If there are any annotations it will give those values to the annotation dataframe"""
row = file_list.iloc[index]
audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav')
print(f"Audio path: {audio_path}, Exists: {os.path.exists(audio_path)}")
sentence = row["SENTENCE"]
# If the user already made an annotation for this example, gradio will return said annotation
previous_annotation = (
annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["SAMPLE ID"], "emotion": '',
"comments": ''}
)
return (sentence, audio_path, previous_annotation['emotion'], previous_annotation["comments"])
def save_annotation(emotions, comments):
"""Save the annotation for the current example."""
idx = current_index["index"]
row = file_list.iloc[idx]
sample_id = row["SAMPLE ID"]
sentence = row["SENTENCE"]
# Update or append annotation
if sample_id in annotations["sample_id"].values:
annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "comments"]] = \
[emotions, comments]
else:
annotations.loc[len(annotations)] = [sample_id, sentence, emotions, comments]
annotations.to_csv("annotations.csv", index=False) # Save to a CSV file
def next_example(emotions, comments):
"""Move to the next example."""
if emotions == '':
raise gr.Error("Please fill out the emotion section")
else:
save_annotation(emotions, comments)
if current_index["index"] < len(file_list) - 1:
current_index["index"] += 1
return load_example(current_index["index"])
#return "End of examples", None, 0, 0, 0, 0, ''
def previous_example(emotion, comments):
"""Move to the previous example."""
save_annotation(emotion, comments)
if current_index["index"] > 0:
current_index["index"] -= 1
return load_example(current_index["index"])
return load_example(current_index["index"])
def update_progress():
"""Update annotation progress"""
global annotations_completed
if annotations_completed < total_annotations:
annotations_completed += 1
progress = annotations_completed / total_annotations
return f"Progress: {annotations_completed}/{annotations_tasks}"
# Gradio Interface
audio_path = 'test.mp3'
with (gr.Blocks(theme=gr.themes.Soft()) as demo):
gr.Markdown("# Task Progress Tracker")
progress_text = gr.Textbox(value="Progress: 0/10", interactive=False)
# Row with audio player
with gr.Row():
audio_player = gr.Audio(value=audio_path, label="Audio", type="filepath", interactive=False)
# Hidden row with corresponding sentence
with gr.Row():
with gr.Accordion(label="Click to see the sentence", open=False):
sentence_text = gr.Textbox(label="Sentence", interactive=False)
# Row for emotion annotation and confidence
with gr.Row():
emotions = gr.Radio(["Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion")
confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10)
# Instructions for emotion annotation
with gr.Sidebar():
happy_words = gr.Textbox(label = "Happy")
# Next and Previous Buttons
with gr.Row():
previous_button = gr.Button("Previous Example")
next_button = gr.Button("Next Example")
comments = gr.Textbox(label="Comments", interactive=True)
sentence_text.value, audio_player.value, emotions.value, comments.value = load_example(
current_index["index"])
previous_button.click(
previous_example,
inputs=[emotions, comments],
outputs=[sentence_text, audio_player, emotions, comments],
)
next_button.click(
next_example,
inputs=[emotions, comments],
outputs=[sentence_text, audio_player, emotions, comments],
)
next_button.click(update_progress, outputs=[progress_text])
demo.launch()
|