Spaces:
Sleeping
Sleeping
File size: 5,890 Bytes
8ae1600 79f135a 8ae1600 06b95aa 8ae1600 05ab087 d8e60e3 8ae1600 06b95aa 8ae1600 d8e60e3 8ae1600 d8e60e3 8ae1600 05ab087 8ae1600 06b95aa 8ae1600 06b95aa 8ae1600 06b95aa 8ae1600 06b95aa 8ae1600 06b95aa 8ae1600 06b95aa 8ae1600 d8e60e3 a02f3a0 06b95aa d8e60e3 1924fb2 a02f3a0 d8e60e3 06b95aa a02f3a0 1924fb2 8ae1600 06b95aa 8ae1600 1924fb2 8ae1600 06b95aa 8ae1600 06b95aa d8e60e3 06b95aa a02f3a0 06b95aa d8e60e3 a02f3a0 8ae1600 06b95aa 8ae1600 a02f3a0 8ae1600 1924fb2 06b95aa 8ae1600 a02f3a0 1924fb2 06b95aa 1924fb2 8ae1600 d8e60e3 8ae1600 a02f3a0 8ae1600 1924fb2 d8e60e3 8ae1600 06b95aa 1924fb2 06b95aa d8e60e3 06b95aa d8e60e3 06b95aa 8ae1600 d8e60e3 06b95aa d8e60e3 06b95aa 8ae1600 06b95aa d8e60e3 1924fb2 d8e60e3 8ae1600 d8e60e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import gradio as gr
import pandas as pd
import os
import gradio as gr
css = """#myProgress {
width: 100%;
background-color: gray;
border-radius: 2px;
}
#myBar {
width: 0%;
height: 30px;
background-color: blue;
border-radius: 2px;
}
#progressText {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: white;
font-weight: bold;
font-size: 14px;
"""
js_progress_bar = """
function move(n_ann, total_ann) {
var elem = document.getElementById("myBar");
elem.style.width = n_ann/total_ann * 100 + "%";
progressText.innerText = 'Completed: ' + n_ann + '/' + total_ann
}
"""
# List of all audio files to annotate
file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
total_annotations = len(file_list)
# Initialize an empty DataFrame to store annotations
annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments'])
current_index = {"index": 0} # Dictionary to allow modifying inside functions
def load_example(index):
"""Loads the example in row #index from dataframe file_list.
If there are any annotations it will give those values to the annotation dataframe"""
row = file_list.iloc[index]
audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav')
print(f"Audio path: {audio_path}, Exists: {os.path.exists(audio_path)}")
sentence = row["SENTENCE"]
# If the user already made an annotation for this example, gradio will return said annotation
previous_annotation = (
annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["SAMPLE ID"], "emotion": '', "confidence": 0,
"comments": ''}
)
return (sentence, audio_path, previous_annotation['emotion'], previous_annotation['confidence'], current_index['index'] + 1, previous_annotation["comments"])
def save_annotation(emotions, confidence, comments):
"""Save the annotation for the current example."""
idx = current_index["index"]
row = file_list.iloc[idx]
sample_id = row["SAMPLE ID"]
sentence = row["SENTENCE"]
# Update or append annotation
if sample_id in annotations["sample_id"].values:
annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "confidence", "comments"]] = \
[emotions, confidence, comments]
else:
annotations.loc[len(annotations)] = [sample_id, sentence, emotions, confidence, comments]
ann_completed.value += 1
annotations.to_csv("annotations.csv", index=False) # Save to a CSV file
def next_example(emotions, confidence, comments):
"""Move to the next example."""
if emotions == "Blank":
raise gr.Warning("Please fill out the emotion section")
else:
save_annotation(emotions, confidence, comments)
if current_index["index"] < len(file_list) - 1:
current_index["index"] += 1
return load_example(current_index["index"])
def previous_example(emotion, confidence, comments):
"""Move to the previous example."""
if emotion.value != "Blank":
save_annotation(emotion, confidence, comments)
if current_index["index"] > 0:
current_index["index"] -= 1
return load_example(current_index["index"])
return load_example(current_index["index"])
# ===================
# Gradio Interface
# ===================
audio_path = 'test.mp3'
with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
lets_go = gr.Button("Let's go!")
ann_completed = gr.Number(1, visible=False)
total = gr.Number(total_annotations, visible=False)
# Row with progress bar
gr.HTML("""
<div id="myProgress">
<div id="myBar">
<span id="progressText">Completed: /</span>
</div>
</div>""")
# Row with audio player
with gr.Row():
audio_player = gr.Audio(value=audio_path, label="Audio", type="filepath", interactive=False, visible=False)
# Hidden row with corresponding sentence
with gr.Row():
accordion = gr.Accordion(label="Click to see the sentence", open=False)
with accordion:
sentence_text = gr.Textbox(label="Sentence", interactive=False)
# Row for emotion annotation and confidence
with gr.Row():
emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank")
with gr.Row():
confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10)
# Instructions for emotion annotation
with gr.Sidebar():
happy_words = gr.Textbox(label = "Happy")
# Next and Previous Buttons
with gr.Row():
previous_button = gr.Button("Previous Example")
next_button = gr.Button("Next Example")
# Comment section
comments = gr.Textbox(label="Comments", interactive=True)
# Go back
previous_button.click(
previous_example,
inputs=[emotions, confidence, comments],
outputs=[sentence_text, audio_player, emotions, confidence, ann_completed, comments],
)
# Go to the next example
next_button.click(
next_example,
inputs=[emotions, confidence, comments],
outputs=[sentence_text, audio_player, emotions, confidence, ann_completed, comments],
)
#Update progress bar
next_button.click(None, [], [ann_completed, total], js = js_progress_bar)
lets_go.click(load_example, inputs = [gr.Number(current_index["index"], visible = False)], outputs = [sentence_text, audio_playe, emotions, confidence, ann_completed, comments])
demo.launch()
|