File size: 16,936 Bytes
8ae1600
 
 
 
db710bd
8ae1600
06b95aa
 
8729a3e
06b95aa
 
 
626626c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db710bd
 
626626c
 
 
 
 
 
 
 
 
 
 
 
 
db710bd
 
06b95aa
8729a3e
db710bd
 
06b95aa
 
d9b07cf
06b95aa
 
83e5388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9b07cf
06b95aa
8729a3e
 
83e5388
 
626626c
 
 
 
 
 
 
 
 
 
 
 
db710bd
626626c
 
db710bd
626626c
 
db710bd
 
 
626626c
 
 
 
 
 
 
 
 
db710bd
626626c
 
 
db710bd
626626c
 
 
db710bd
626626c
 
 
db710bd
 
 
626626c
db710bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
626626c
db710bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
626626c
8729a3e
626626c
db710bd
8ae1600
05ab087
d8e60e3
8ae1600
06b95aa
8ae1600
 
d8e60e3
8ae1600
d8e60e3
 
 
8ae1600
05ab087
8ae1600
 
 
 
d9b07cf
8ae1600
 
06b95aa
8ae1600
 
d9b07cf
8ae1600
06b95aa
8ae1600
 
 
 
 
 
 
06b95aa
 
8ae1600
06b95aa
 
db710bd
d8e60e3
a02f3a0
d9b07cf
d8e60e3
1924fb2
db710bd
a02f3a0
d8e60e3
d9b07cf
a02f3a0
 
1924fb2
8ae1600
d9b07cf
8ae1600
1924fb2
d9b07cf
1924fb2
 
 
8ae1600
06b95aa
d9b07cf
 
 
 
8ae1600
d9b07cf
4f8929d
 
db710bd
4f8929d
 
 
d9b07cf
 
06b95aa
d8e60e3
06b95aa
d9b07cf
06b95aa
 
8ae1600
 
d9b07cf
 
db710bd
d9b07cf
a02f3a0
626626c
 
8729a3e
db710bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8e60e3
d9b07cf
8729a3e
 
626626c
8729a3e
 
 
 
 
 
 
 
83e5388
 
8729a3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db710bd
 
 
8729a3e
 
8ae1600
d8e60e3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
import gradio as gr
import pandas as pd
import os
import gradio as gr
from pathlib import Path

css = """#myProgress {
  width: 100%;
  background-color: var(--block-border-color);
  border-radius: 2px;
}

  #myBar {
    width: 0%;
    height: 30px;
    background-color: var(--block-title-background-fill);
    border-radius: 2px;
  } 

  #progressText {
    position: absolute;
    top: 50%;
    left: 50%;
    transform: translate(-50%, -50%); 
    color: var(--block-title-text-color); 
    font-weight: regular; 
    font-size: 14px;
  }

  h1, h2, h3, h4 {
    padding: var(--block-title-padding);
    color: var(--block-title-text-color);
    border: solid var(--block-title-border-width) var(--block-title-border-color);
    border-radius: var(--block-title-radius);
    background: var(--block-title-background-fill);
    width: fit-content;
    display: inline-block;
  }

  h4 {
    margin: 0px;
    color: var(--block-title-background-fill);
    background: var(--block-title-text-color);
  }

  #instructions {
    max-width: 980px;
    align-self: center;
  }

  .content-box {
    border-color: var(--block-border-color);
    border-radius: var(--block-radius);
    background: var(--block-background-fill);
    padding: var(--block-label-padding);
  }


"""



js_progress_bar = """
    function move(n_ann, total_ann) {

    var elem = document.getElementById("myBar");
    elem.style.width = n_ann/total_ann * 100 + "%";
    progressText.innerText = 'Completed: ' + n_ann + ' / ' + total_ann;

    const waveform = document.querySelector('#waveform div');
    const shadowRoot = waveform.shadowRoot;
    const canvases = shadowRoot.querySelector('.wrapper');

    console.log(canvases.offsetWidth)

    const leftOffsetPct = 0.3;
    const widthPct = 0.3;
    

    // Create a style element for the shadow DOM
    const style = document.createElement('style');
    style.textContent = `
    .wrapper::after {
        content: '';
        position: absolute;
        top: 0;
        left: ${canvases.offsetWidth * leftOffsetPct}px;
        width: ${canvases.offsetWidth * widthPct}px;
        height: 100%;
        background-color: blue;
        z-index: 999;
        opacity: 0.5;
    }

    /* Ensure parent has positioning context */
    .wrapper {
        position: relative;
    }
    `;

    // Append the style to the shadow root
    shadowRoot.appendChild(style);

    console.log('Added pseudo-element to canvases');


    }
    """




intro_html = """

<div class="content-box">
    <p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>

    <ul>
    <li><h4>Anger</h4></li>
    <li><h4>Happiness</h4></li>
    <li><h4>Sadness</h4></li>
    <li><h4>Neutral</h4></li>
    </ul>

    <p>This may seem like a small set, but it's a great starting point for analyzing emotions in such a large collection— <strong>303 hours of interviews! (That’s 13 days of nonstop listening! &#128558)</strong> </p>
</div>

<h2>The ACT-UP Oral History Project</h2>

<div class="content-box">
    <p>You will be annotating short audio clips extracted from the ACT UP (AIDS Coalition to Unleash Power) Oral History Project developed by Sarah Schulman and Jim Hubbard . This archive features interviews with individuals who were part of ACT UP during the late 1980s and early 1990s, amidst the AIDS epidemic. In each video, the subjects talk about their life before the epidemic, how they were affected by AIDS and their work in ACT UP. The project comprises 187 interviews with members of the AIDS Coalition to Unleash Power (ACT UP) during the AIDS epidemic in New York in the late 1980s and early 1990s. 
    </p><p>Schulman sought to document the group’s public activism and capture the atmosphere among its members at the height of the crisis: </p><h4>"I wanted to show how crazy and desperate everyone was at that point, organizing political funerals and riding around in vans with the bodies of their dead friends. I wanted to convey what the suffering was like at that point."</h4><p> 
    Sullivan describes the archive as a space that embodies challenging emotions, such as the pervasive fear of death, grief, and what Jim Hubbard refers to as the activists' "righteous anger." </p>
</div>

<h2>What will you be annotating?</h2>

<div class="content-box">
    <p>You will annotate one emotion per short audio clip, based on the following criteria:</p>

    <ul>
    <li>
        <h4>Predominant Emotion:</h4> The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
    </li>
    
    <li>
        <h4>Perceived Emotion at the Time of Recording:</h4> In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
    </li>
    
    <li>
        <h4>Speech Emotionality:</h4> Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
    </li>
    </ul>

    <p>Further, you will be asked to fill <strong>"How confident you are that the annotated emotion is present in the recording?"</strong> from a scale of 0 to 10, with 0 being "not at all confident" and 1 being "certain, completely confident". There will be a <strong>"Comment/Feedback"</strong> section where you can makes notes. Below the audio, there will be an option to view the transcribed sentence. Please use this only if you are struggling to understand the audio.</p>
</div>
"""

examples_explanation = """<h3>Audio examples</h3>
    <div class="content-box">
        <p>Let's check out examples for the four emotions to annotate. Note that all these examples use the same sentence and are acted out, making the emotionality in speech more apparent. In a real-world setting, emotionality is more complex, so you will find a list of additional emotions within each of the three emotion categories (Happy, Sad, and Angry) to assist you during annotation.</p>
    </div>"""
side_bar_html = """
<h3>The task</h3>
<div class="content-box">
    <p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
    <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
        <span>&#9989;</span>
        <h4 style="margin: 0;">Predominant Emotion</h4>
    </div>

    <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
        <span>&#9989;</span>
        <h4 style="margin: 0;">Perceived Emotion at the Time of Recording</h4>
    </div>
    <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
        <span>&#9989;</span>
        <h4 style="margin: 0;">Speech Emotionality</h4>
    </div>
    
</div>
<h3>Major subclasses</h3>

<div class="content-box">
    <table border="1">
        <thead>
            <tr>
                <th>Emotion Label</th>
                <th>Major Subclasses</th>
            </tr>
        </thead>
        <tbody>
            <tr>
                <td>Happiness</td>
                <td>Affection, Goodwill, Joy, Satisfaction, Zest, Acceptance, Pride, Hope, Excitement, Relief, Passion, Caring</td>
            </tr>
            <tr>
                <td>Sadness</td>
                <td>Suffering, Regret, Displeasure, Embarrassment, Sympathy, Depression</td>
            </tr>
            <tr>
                <td>Anger</td>
                <td>Irritability, Torment, Jealousy, Disgust, Rage, Frustration</td>
            </tr>
        </tbody>
    </table>
</div>
"""

persistent_storage = Path('/data')
# List of all audio files to annotate
file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
total_annotations = len(file_list)
# Initialize an empty DataFrame to store annotations
annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments'])
current_index = {"index": 0}  # Dictionary to allow modifying inside functions


def load_example(index):
    """Loads the example in row #index from dataframe file_list. 
    If there are any annotations it will give those values to the annotation dataframe"""
    
    row = file_list.iloc[index]
    audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav')
    sentence = row["SENTENCE"]

    # If the user already made an annotation for this example, gradio will return said annotation
    previous_annotation = (
        annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["SAMPLE ID"], "emotion": 'Blank', "confidence": 0,
                                                                            "comments": ''}
    )
    return (sentence, audio_path, previous_annotation['emotion'], previous_annotation['confidence'], current_index['index'] + 1, previous_annotation["comments"])


def save_annotation(emotions, confidence, comments, participant_id):
    """Save the annotation for the current example."""

    idx = current_index["index"]
    row = file_list.iloc[idx]
    sample_id = row["SAMPLE ID"]
    sentence = row["SENTENCE"]

    # Update or append annotation
    if sample_id in annotations["sample_id"].values:
        annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "confidence", "comments"]] = \
            [emotions, confidence, comments]
    else:
        annotations.loc[len(annotations)] = [sample_id, sentence, emotions, confidence, comments]
        ann_completed.value += 1
    annotations.to_csv(f"{persistent_storage}/{participant_id}_annotations.csv", index=False)  # Save to a CSV file
    

def next_example(emotions, confidence, comments, participant_id):
    """Move to the next example."""
    if emotions == "Blank":
        gr.Warning("Please fill out the emotion section. 'Blank' is not a valid emotion.")
    else:
        
        save_annotation(emotions, confidence, comments, participant_id)
        if current_index["index"] < len(file_list) - 1:
            current_index["index"] += 1
    return load_example(current_index["index"])

def previous_example(emotion, confidence, comments, participant_id):
    """Move to the previous example."""
    if emotion.value != "Blank":
        save_annotation(emotion, confidence, comments, participant_id)
        if current_index["index"] > 0:
            current_index["index"] -= 1
            return load_example(current_index["index"])
        return load_example(current_index["index"])

def deactivate_participant_id(participant_id, lets_go):
    participant_id = gr.Textbox(label='What is your participant ID?', value = participant_id, interactive = False)
    lets_go = gr.Button("Participant selected!", interactive = False)
    return participant_id, lets_go

def activate_elements(emotions, confidence, comments, next_button, previous_button):

    emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = True)
    confidence = gr.Slider(label="How confident are you that the annotated emotion is present in the recording? (%)", minimum=0, maximum=100, step=10, visible = True)
    comments = gr.Textbox(label="Comments", visible =True)
    previous_button = gr.Button("Previous Example", visible = True)
    next_button = gr.Button("Next Example",visible = True)

    return emotions, confidence, comments, next_button, previous_button
# ===================
# Gradio Interface
# ===================


with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
    # Instructions for emotion annotation
    with gr.Sidebar():
        participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
        lets_go = gr.Button("Let's go!")
        cheat_sheet = gr.HTML(side_bar_html, padding = False)
        #happy_words = gr.Textbox(label = "Happy")

    with gr.Tab("Instructions", elem_id = 'instructions'):
        instructions = gr.HTML(intro_html, padding = False)
        
        with gr.Blocks("Audio examples"):
            description = gr.HTML(examples_explanation, padding = False)

            with gr.Accordion(label = "Neutral", open= False):
                neutral_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/neutral.wav', label = "Neutral")
            
            with gr.Accordion(label = "Happy",  open = False):
                happy_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/happy_low.wav', label = "Happy (Low Intensity)")
                happy_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/happy_intense.wav', label = "Happy (High Intensity)")
            
            with gr.Accordion(label = "Sad",  open = False):
                sad_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/sad_low.wav', label = "Sad (Low Intensity)")
                sad_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/sad_intense.wav', label = "Sad (High Intensity)")

            with gr.Accordion(label = "Anger",  open = False):
                angry_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_low.wav', label = "Anger (Low Intensity)")
                angry_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_intense.wav', label = "Anger (High Intensity)")

            agreement = gr.Checkbox(value = False, label = "I agree", info = "I agree to have my annotations, comments, and questionnaire answers used for research purposes. I understand that any personal information will be anonymized.", interactive = True)
            


    with gr.Tab("Annotation Interface"):

        ann_completed = gr.Number(0, visible=False)
        total = gr.Number(total_annotations, visible=False)
        
        # Row with progress bar
        gr.HTML("""
        <div id="myProgress">
        <div id="myBar">
        <span id="progressText">Press "Let's go!" to start</span> 
        </div>
        </div>
        """, padding = False)

        # Row with audio player
        with gr.Row():
            audio_player = gr.Audio(value= 'test.mp3', label="Audio", type="filepath", interactive=False, show_download_button = False, show_share_button = False)

        # Hidden row with corresponding sentence
        with gr.Row():
            accordion = gr.Accordion(label="Click to see the sentence", open=False)
            with accordion:
                sentence_text = gr.Textbox(label="Transcription", interactive=False, value = 'This is a sentence.')
        # Row for emotion annotation and confidence
        with gr.Row():        
            emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = False)

        with gr.Row():    
            confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10, visible = False)

        with gr.Row():
            # Comment section
            comments = gr.Textbox(label="Comments", visible =False)
            
        # Next and Previous Buttons
        with gr.Row():
            previous_button = gr.Button("Previous Example", visible = False)
            next_button = gr.Button("Next Example", visible = False)

        # Go back
        previous_button.click(
            previous_example,
            inputs=[emotions, confidence, comments, participant_id],
            outputs=[sentence_text, audio_player, emotions, confidence, ann_completed, comments],
        )

        # Go to the next example
        next_button.click(
            next_example,
            inputs=[emotions, confidence, comments, participant_id],
            outputs=[sentence_text, audio_player, emotions, confidence, ann_completed, comments],
        )
        #Update progress bar
        next_button.click(None, [], [ann_completed, total], js = js_progress_bar)
        
        
        lets_go.click(None, [], [ann_completed, total], js = js_progress_bar)
        lets_go.click(deactivate_participant_id, [participant_id, lets_go], [participant_id, lets_go])
        lets_go.click(activate_elements, [emotions, confidence, comments, next_button, previous_button], [emotions, confidence, comments, next_button, previous_button])
        lets_go.click(load_example, inputs = [gr.Number(current_index["index"], visible = False)], outputs = [sentence_text, audio_player, emotions, confidence, ann_completed, comments])

        
demo.launch()