Spaces:
Sleeping
Sleeping
File size: 12,591 Bytes
8ae1600 79f135a 8ae1600 06b95aa 8729a3e 06b95aa 626626c 06b95aa 8729a3e 06b95aa d9b07cf 06b95aa 83e5388 d9b07cf 06b95aa 8729a3e 83e5388 626626c 8729a3e 626626c 83e5388 8ae1600 05ab087 d8e60e3 8ae1600 06b95aa 8ae1600 d8e60e3 8ae1600 d8e60e3 8ae1600 05ab087 8ae1600 d9b07cf 8ae1600 06b95aa 8ae1600 d9b07cf 8ae1600 06b95aa 8ae1600 06b95aa 8ae1600 06b95aa d9b07cf d8e60e3 a02f3a0 d9b07cf d8e60e3 1924fb2 d9b07cf a02f3a0 d8e60e3 d9b07cf a02f3a0 1924fb2 8ae1600 d9b07cf 8ae1600 1924fb2 d9b07cf 1924fb2 8ae1600 06b95aa d9b07cf 8ae1600 d9b07cf 4f8929d d9b07cf 06b95aa d8e60e3 06b95aa d9b07cf 06b95aa 8ae1600 d9b07cf 8ae1600 a02f3a0 626626c 8729a3e d9b07cf d8e60e3 d9b07cf 8729a3e 626626c 8729a3e 83e5388 8729a3e 83e5388 8729a3e 8ae1600 d8e60e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 |
import gradio as gr
import pandas as pd
import os
import gradio as gr
css = """#myProgress {
width: 100%;
background-color: var(--block-border-color);
border-radius: 2px;
}
#myBar {
width: 0%;
height: 30px;
background-color: var(--block-title-background-fill);
border-radius: 2px;
}
#progressText {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: var(--block-title-text-color);
font-weight: regular;
font-size: 14px;
}
h1, h2, h3, h4 {
padding: var(--block-title-padding);
color: var(--block-title-text-color);
border: solid var(--block-title-border-width) var(--block-title-border-color);
border-radius: var(--block-title-radius);
background: var(--block-title-background-fill);
width: fit-content;
display: inline-block;
}
h4 {
margin: 0px;
color: var(--primary-800);
}
#instructions {
max-width: 980px;
align-self: center;
}
.content-box {
border-color: var(--block-border-color);
border-radius: var(--block-radius);
background: var(--block-background-fill);
padding: var(--block-label-padding);
}
"""
js_progress_bar = """
function move(n_ann, total_ann) {
var elem = document.getElementById("myBar");
elem.style.width = n_ann/total_ann * 100 + "%";
progressText.innerText = 'Completed: ' + n_ann + ' / ' + total_ann;
const waveform = document.querySelector('#waveform div');
const shadowRoot = waveform.shadowRoot;
const canvases = shadowRoot.querySelector('.wrapper');
console.log(canvases.offsetWidth)
const leftOffsetPct = 0.3;
const widthPct = 0.3;
// Create a style element for the shadow DOM
const style = document.createElement('style');
style.textContent = `
.wrapper::after {
content: '';
position: absolute;
top: 0;
left: ${canvases.offsetWidth * leftOffsetPct}px;
width: ${canvases.offsetWidth * widthPct}px;
height: 100%;
background-color: blue;
z-index: 999;
opacity: 0.5;
}
/* Ensure parent has positioning context */
.wrapper {
position: relative;
}
`;
// Append the style to the shadow root
shadowRoot.appendChild(style);
console.log('Added pseudo-element to canvases');
}
"""
intro_html = """
<h1>Emotionality in Speech</h1>
<div class="content-box">
<p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>
<ul>
<li><h4>Anger</h4></li>
<li><h4>Happiness</h4></li>
<li><h4>Sadness</h4></li>
<li><h4>Neutral</h4></li>
</ul>
<p>This may seem like a small set, but it's a great starting point for analyzing emotions in such a large collection—303 hours of interviews! (That’s 13 days of nonstop listening!)</p>
</div>
<h2>What is the archive you will be annotating?</h2>
<div class="content-box">
<p>You will be annotating short audio clips extracted from the ACT UP (AIDS Coalition to Unleash Power) Oral History Project. This archive features interviews with individuals who were part of ACT UP during the late 1980s and early 1990s, amidst the AIDS epidemic.</p>
<p>In each video, the subjects talk about their life before the epidemic, how they were affected by AIDS and their work in ACT UP.</p>
</div>
<h2>What will you be annotating?</h2>
<div class="content-box">
<p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
<ul>
<li>
<strong>Predominant Emotion:</strong> The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
</li>
<li>
<strong>Perceived Emotion at the Time of Recording:</strong> In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
</li>
<li>
<strong>Speech Emotionality:</strong> Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
</li>
</ul>
<p>Further, you will be asked to fill "How confident you are that the annotated emotion is present in the recording?" from a scale of 0 to 10, with 0 being "not at all confident" and 1 being "certain, completely confident".</p>
<p>There will be a "Comment/Feedback" section where you can makes notes.<br>Below the audio, there will be an option to view the transcribed sentence. Please use this only if you are struggling to understand the audio.</p>
<p>Let’s explore the four possible categories and listen to some examples!</p>
</div>
"""
# List of all audio files to annotate
file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
total_annotations = len(file_list)
# Initialize an empty DataFrame to store annotations
annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments'])
current_index = {"index": 0} # Dictionary to allow modifying inside functions
def load_example(index):
"""Loads the example in row #index from dataframe file_list.
If there are any annotations it will give those values to the annotation dataframe"""
row = file_list.iloc[index]
audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav')
sentence = row["SENTENCE"]
# If the user already made an annotation for this example, gradio will return said annotation
previous_annotation = (
annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["SAMPLE ID"], "emotion": 'Blank', "confidence": 0,
"comments": ''}
)
return (sentence, audio_path, previous_annotation['emotion'], previous_annotation['confidence'], current_index['index'] + 1, previous_annotation["comments"])
def save_annotation(emotions, confidence, comments, participant_id):
"""Save the annotation for the current example."""
idx = current_index["index"]
row = file_list.iloc[idx]
sample_id = row["SAMPLE ID"]
sentence = row["SENTENCE"]
# Update or append annotation
if sample_id in annotations["sample_id"].values:
annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "confidence", "comments"]] = \
[emotions, confidence, comments]
else:
annotations.loc[len(annotations)] = [sample_id, sentence, emotions, confidence, comments]
ann_completed.value += 1
annotations.to_csv(f"{participant_id}_annotations.csv", index=False) # Save to a CSV file
def next_example(emotions, confidence, comments, participant_id):
"""Move to the next example."""
if emotions == "Blank":
gr.Warning("Please fill out the emotion section")
else:
save_annotation(emotions, confidence, comments, participant_id)
if current_index["index"] < len(file_list) - 1:
current_index["index"] += 1
return load_example(current_index["index"])
def previous_example(emotion, confidence, comments, participant_id):
"""Move to the previous example."""
if emotion.value != "Blank":
save_annotation(emotion, confidence, comments, participant_id)
if current_index["index"] > 0:
current_index["index"] -= 1
return load_example(current_index["index"])
return load_example(current_index["index"])
def deactivate_participant_id(participant_id, lets_go):
participant_id = gr.Textbox(label='What is your participant ID?', value = participant_id, interactive = False)
lets_go = gr.Button("Participant selected!", interactive = False)
return participant_id, lets_go
def activate_elements(emotions, confidence, comments, next_button, previous_button):
emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = True)
confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10, visible = True)
comments = gr.Textbox(label="Comments", visible =True)
previous_button = gr.Button("Previous Example", visible = True)
next_button = gr.Button("Next Example",visible = True)
return emotions, confidence, comments, next_button, previous_button
# ===================
# Gradio Interface
# ===================
with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
# Instructions for emotion annotation
with gr.Sidebar():
participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
lets_go = gr.Button("Let's go!")
#happy_words = gr.Textbox(label = "Happy")
with gr.Tab("Instructions", elem_id = 'instructions'):
instructions = gr.HTML(intro_html, padding = False)
agreement = gr.Checkbox(value = False, label = "I agree", info = "I agree to have my annotations, comments, and questionnaire answers used for research purposes. I understand that any personal information will be anonymized.", interactive = True)
with gr.Tab("Annotation Interface"):
ann_completed = gr.Number(0, visible=False)
total = gr.Number(total_annotations, visible=False)
# Row with progress bar
gr.HTML("""
<div id="myProgress">
<div id="myBar">
<span id="progressText">Press "Let's go!" to start</span>
</div>
</div>
""", padding = False)
# Row with audio player
with gr.Row():
audio_player = gr.Audio(value= 'test.mp3', label="Audio", type="filepath", interactive=False, show_download_button = False, show_share_button = False)
# Hidden row with corresponding sentence
with gr.Row():
accordion = gr.Accordion(label="Click to see the sentence", open=False)
with accordion:
sentence_text = gr.Textbox(label="Transcription", interactive=False, value = 'This is a sentence.')
# Row for emotion annotation and confidence
with gr.Row():
emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = False)
with gr.Row():
confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10, visible = False)
with gr.Row():
# Comment section
comments = gr.Textbox(label="Comments", visible =False)
# Next and Previous Buttons
with gr.Row():
previous_button = gr.Button("Previous Example", visible = False)
next_button = gr.Button("Next Example", visible = False)
# Go back
previous_button.click(
previous_example,
inputs=[emotions, confidence, comments, participant_id],
outputs=[sentence_text, audio_player, emotions, confidence, ann_completed, comments],
)
# Go to the next example
next_button.click(
next_example,
inputs=[emotions, confidence, comments, participant_id],
outputs=[sentence_text, audio_player, emotions, confidence, ann_completed, comments],
)
#Update progress bar
next_button.click(None, [], [ann_completed, total], js = js_progress_bar)
lets_go.click(None, [], [ann_completed, total], js = js_progress_bar)
# lets_go.click(deactivate_participant_id, [participant_id, lets_go], [participant_id, lets_go])
# lets_go.click(activate_elements, [emotions, confidence, comments, next_button, previous_button], [emotions, confidence, comments, next_button, previous_button])
# lets_go.click(load_example, inputs = [gr.Number(current_index["index"], visible = False)], outputs = [sentence_text, audio_player, emotions, confidence, ann_completed, comments])
demo.launch()
|