File size: 21,474 Bytes
8ae1600
 
 
 
db710bd
63fb484
eaf8706
 
06b95aa
 
8729a3e
06b95aa
 
 
626626c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db710bd
 
626626c
 
 
 
 
 
 
 
 
 
 
 
 
db710bd
 
06b95aa
8729a3e
db710bd
 
06b95aa
eaf8706
83e5388
eaf8706
 
 
 
 
 
 
83e5388
eaf8706
83e5388
eaf8706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9b07cf
06b95aa
8729a3e
 
83e5388
 
626626c
 
 
 
 
 
 
 
 
 
 
 
db710bd
626626c
 
db710bd
626626c
 
61ec2c0
 
 
626626c
 
 
 
 
 
 
 
 
db710bd
626626c
 
 
db710bd
626626c
 
 
db710bd
626626c
 
 
db710bd
 
 
626626c
db710bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
626626c
db710bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
626626c
8729a3e
def4405
61ec2c0
c2a9ce1
63fb484
7b09113
def4405
 
 
 
 
 
7b09113
eaf8706
 
 
 
 
 
 
 
 
 
 
 
63fb484
bfe263f
63fb484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfe263f
 
d8e60e3
c2a9ce1
3cbe45e
7b09113
61ec2c0
3cbe45e
 
 
2de91c6
7b09113
c2a9ce1
3cbe45e
 
7b09113
 
 
c2a9ce1
def4405
8ae1600
d8e60e3
 
 
8ae1600
def4405
 
8ae1600
 
 
59fe1f6
2de91c6
8ae1600
eaf8706
 
 
 
 
2de91c6
8ae1600
 
2de91c6
8ae1600
06b95aa
c2a9ce1
def4405
 
8ae1600
 
 
2de91c6
 
8ae1600
2de91c6
7b09113
db710bd
d8e60e3
7b09113
a02f3a0
2de91c6
d8e60e3
c2a9ce1
1924fb2
db710bd
1485b06
 
 
 
2de91c6
c2a9ce1
 
 
8ae1600
2de91c6
8ae1600
c2a9ce1
def4405
2de91c6
c2a9ce1
 
def4405
c2a9ce1
06b95aa
2de91c6
def4405
 
 
2de91c6
8ae1600
c2a9ce1
def4405
 
 
2de91c6
4f8929d
2de91c6
1485b06
 
c2a9ce1
 
1485b06
 
def4405
59fe1f6
def4405
 
 
 
 
 
2de91c6
 
 
 
def4405
2de91c6
06b95aa
d8e60e3
06b95aa
d9b07cf
06b95aa
 
61ec2c0
 
 
 
 
 
8ae1600
 
d9b07cf
 
db710bd
d9b07cf
a02f3a0
626626c
 
8729a3e
3cbe45e
db710bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2a9ce1
d9b07cf
7b09113
8729a3e
7b09113
def4405
c2a9ce1
eaf8706
 
 
2de91c6
 
8729a3e
7b09113
8729a3e
 
 
 
 
83e5388
 
7b09113
8729a3e
 
9971ae0
8729a3e
 
 
 
 
 
 
 
 
 
 
59fe1f6
8729a3e
 
 
 
 
 
 
 
 
 
 
 
 
2de91c6
 
8729a3e
 
 
 
2de91c6
 
8729a3e
def4405
 
2de91c6
 
7b09113
63fb484
 
 
 
 
 
 
 
 
 
 
8729a3e
def4405
d8e60e3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
import gradio as gr
import pandas as pd
import os
import gradio as gr
from pathlib import Path
from huggingface_hub import login
from mutagen.mp3 import MP3
from mutagen.wave import WAVE
css = """#myProgress {
  width: 100%;
  background-color: var(--block-border-color);
  border-radius: 2px;
}

  #myBar {
    width: 0%;
    height: 30px;
    background-color: var(--block-title-background-fill);
    border-radius: 2px;
  } 

  #progressText {
    position: absolute;
    top: 50%;
    left: 50%;
    transform: translate(-50%, -50%); 
    color: var(--block-title-text-color); 
    font-weight: regular; 
    font-size: 14px;
  }

  h1, h2, h3, h4 {
    padding: var(--block-title-padding);
    color: var(--block-title-text-color);
    border: solid var(--block-title-border-width) var(--block-title-border-color);
    border-radius: var(--block-title-radius);
    background: var(--block-title-background-fill);
    width: fit-content;
    display: inline-block;
  }

  h4 {
    margin: 0px;
    color: var(--block-title-background-fill);
    background: var(--block-title-text-color);
  }

  #instructions {
    max-width: 980px;
    align-self: center;
  }

  .content-box {
    border-color: var(--block-border-color);
    border-radius: var(--block-radius);
    background: var(--block-background-fill);
    padding: var(--block-label-padding);
  }


"""



js_progress_bar = """
    function move(start, end, total_duration, current_index, n_ann, total_ann) {

        var elem = document.getElementById("myBar");
        elem.style.width = n_ann/total_ann * 100 + "%";
        progressText.innerText = `${current_index} / ${total_ann} (Completed: ${n_ann})`;
        
        const waveform = document.querySelector('#audio_to_annotate #waveform div');
        const shadowRoot = waveform.shadowRoot;
        const canvases = shadowRoot.querySelector('.wrapper');

        console.log(canvases.offsetWidth)

        const leftOffsetPct = start / total_duration;
        const widthPct = (end - start) / total_duration;
        
        // Get CSS variable for background color
        const blockColor = getComputedStyle(document.documentElement)
            .getPropertyValue('--block-title-background-fill')
            .trim() || 'red'; // Default to red if variable is not found

        // Create a style element for the shadow DOM
        const style = document.createElement('style');
        style.textContent = `
        .wrapper::after {
            content: '';
            position: absolute;
            top: 0;
            left: ${canvases.offsetWidth * leftOffsetPct}px;
            width: ${canvases.offsetWidth * widthPct}px;
            height: 100%;
            background-color: blue;
            z-index: 999;
            opacity: 0.5;
        }

        /* Ensure parent has positioning context */
        .wrapper {
            position: relative;
        }
        `;

        // Append the style to the shadow root
        shadowRoot.appendChild(style);

        console.log(start + ' ' + end + ' ' + total_duration);
    }
    """




intro_html = """

<div class="content-box">
    <p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>

    <ul>
    <li><h4>Anger</h4></li>
    <li><h4>Happiness</h4></li>
    <li><h4>Sadness</h4></li>
    <li><h4>Neutral</h4></li>
    </ul>

    <p>This may seem like a small set, but it's a great starting point for analyzing emotions in such a large collection— <strong>303 hours of interviews! (That’s 13 days of nonstop listening! &#128558)</strong> </p>
</div>

<h2>The ACT-UP Oral History Project</h2>

<div class="content-box">
    <p>You will be annotating short audio clips extracted from the ACT UP (AIDS Coalition to Unleash Power) Oral History Project developed by Sarah Schulman and Jim Hubbard . 
    This archive features interviews with individuals who were part of ACT UP during the late 1980s and early 1990s, amidst the AIDS epidemic. 
    In each video, the subjects talk about their life before the epidemic, how they were affected by AIDS and their work in ACT UP.</p>
</div>

<h2>What will you be annotating?</h2>

<div class="content-box">
    <p>You will annotate one emotion per short audio clip, based on the following criteria:</p>

    <ul>
    <li>
        <h4>Predominant Emotion:</h4> The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
    </li>
    
    <li>
        <h4>Perceived Emotion at the Time of Recording:</h4> In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
    </li>
    
    <li>
        <h4>Speech Emotionality:</h4> Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
    </li>
    </ul>

    <p>Further, you will be asked to fill <strong>"How confident you are that the annotated emotion is present in the recording?"</strong> from a scale of 0 to 10, with 0 being "not at all confident" and 1 being "certain, completely confident". There will be a <strong>"Comment/Feedback"</strong> section where you can makes notes. Below the audio, there will be an option to view the transcribed sentence. Please use this only if you are struggling to understand the audio.</p>
</div>
"""

examples_explanation = """<h3>Audio examples</h3>
    <div class="content-box">
        <p>Let's check out examples for the four emotions to annotate. Note that all these examples use the same sentence and are acted out, making the emotionality in speech more apparent. In a real-world setting, emotionality is more complex, so you will find a list of additional emotions within each of the three emotion categories (Happy, Sad, and Angry) to assist you during annotation.</p>
    </div>"""
side_bar_html = """
<h3>The task</h3>
<div class="content-box">
    <p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
    <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
        <span>&#9989;</span>
        <h4 style="margin: 0;">Predominant Emotion</h4>
    </div>

    <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
        <span>&#9989;</span>
        <h4 style="margin: 0;">Perceived Emotion at the Time of Recording</h4>
    </div>
    <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
        <span>&#9989;</span>
        <h4 style="margin: 0;">Speech Emotionality</h4>
    </div>
    
</div>
<h3>Major subclasses</h3>

<div class="content-box">
    <table border="1">
        <thead>
            <tr>
                <th>Emotion Label</th>
                <th>Major Subclasses</th>
            </tr>
        </thead>
        <tbody>
            <tr>
                <td>Happiness</td>
                <td>Affection, Goodwill, Joy, Satisfaction, Zest, Acceptance, Pride, Hope, Excitement, Relief, Passion, Caring</td>
            </tr>
            <tr>
                <td>Sadness</td>
                <td>Suffering, Regret, Displeasure, Embarrassment, Sympathy, Depression</td>
            </tr>
            <tr>
                <td>Anger</td>
                <td>Irritability, Torment, Jealousy, Disgust, Rage, Frustration</td>
            </tr>
        </tbody>
    </table>
</div>
"""
global file_list
persistent_storage = Path('/data')

password_files = os.getenv("password_files")

possible_ids = {'Tiger-001': 0, 'Panda-002': 0, 
                'Falcon-003': 1, 'Wolf-004': 1,
                'Dolphin-005': 2, 'Eagle-006': 2,
                'Jaguar-007': 3, 'Rhino-008': 3,
                'Zebra-009': 4, 'Lion-010': 4,
                'Cheetah-011': 5, 'Bear-012': 5}



def get_audio_duration(file_path):
    if file_path.lower().endswith('.mp3'):
        audio = MP3(file_path)
    elif file_path.lower().endswith(('.wav', '.wave')):
        audio = WAVE(file_path)
    else:
        raise ValueError("Unsupported file format")
    
    return audio.info.length  # Duration in seconds

def get_storage(password):
    #source: https://discuss.huggingface.co/t/accessing-data-folder-of-persistent-storage/46681/2
    if password == password_files:
        files = [
            {
                "orig_name": file.name,
                "name": file.resolve(),
                "size": file.stat().st_size,
                "data": None,
                "is_file": True,
            }
            for file in persistent_storage.glob("**/*.csv")
            if file.is_file()
        ]
        usage = sum([f['size'] for f in files])
    else:
        gr.Warning("Please provide the correct password")
    return files, f"{usage/(1024.0 ** 3):.3f}GB"


def load_first_example(participant_id, ann_completed, current_index):
    """ Loads and first example and updates index"""
    global annotations
    
    path_ann = f'{persistent_storage}/{participant_id}_annotations.csv'
    print(path_ann)
    if os.path.exists(path_ann):
        annotations = pd.read_csv(path_ann, keep_default_na=False)

        current_index = len(annotations)
        print('path was found')
        print(annotations)
        print(len(annotations))
        ann_completed = gr.Number(len(annotations) - 1, visible=False)
        print(len(annotations))
    return *load_example(current_index), ann_completed, current_index
    
def load_example(index):
    """Loads the example in row #index from dataframe file_list. 
    If there are any annotations it will give those values to the annotation dataframe"""
    
    row = file_list.iloc[index]
    audio_path = os.path.join(persistent_storage, 'files_to_annotate_2round', row["sample_id"].split('-')[0], row["sample_id"] + '.wav')
    sentence = row["sentence"]

    # If the user already made an annotation for this example, gradio will return said annotation
    previous_annotation = (
        annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["sample_id"], "emotion": 'Blank', "confidence": 'Blank',
                                                                            "comments": '', "n_clicks": 0}
    )

    start = row['start']
    end = row['end']
    duration = get_audio_duration(audio_path)
    print(f'{start} {end} {duration}')
    return (sentence, audio_path, previous_annotation['emotion'], previous_annotation['confidence'], previous_annotation["comments"], n_clicks, start, end, duration)


def save_annotation(emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
    """Save the annotation for the current example."""

    row = file_list.iloc[current_index]
    sample_id = row["sample_id"]
    sentence = row["sentence"]

    # Update or append annotation
    if sample_id in annotations["sample_id"].values:
        annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "confidence", "comments", "n_clicks"]] = \
            [emotions, confidence, comments, n_clicks]
    else:
        annotations.loc[len(annotations)] = [sample_id, sentence, emotions, confidence, comments, n_clicks]
        ann_completed = gr.Number(ann_completed + 1, visible=False)
    annotations.to_csv(f"{persistent_storage}/{participant_id}_annotations.csv", index=False)  # Save to a CSV file
    
    return ann_completed

def next_example(emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
    """Move to the next example."""

    if emotions == "Blank":
        gr.Warning("Please fill out the emotion section. 'Blank' is not a valid emotion.")
    elif confidence == "Blank":
        gr.Warning("Please fill out the confidence section. 'Blank' is not a valid input.")

    else:  
        ann_completed = save_annotation(emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index)
        if current_index < len(file_list) - 1:
            current_index += 1
    return *load_example(current_index), ann_completed, current_index

def previous_example(emotion, confidence, comments, n_clicks, participant_id,  ann_completed, current_index):
    """Move to the previous example."""
  
    if emotion != "Blank":
        ann_completed = save_annotation(emotion, confidence, comments, n_clicks, participant_id,  ann_completed, current_index)
    if current_index > 0:
        current_index -= 1
            
    return *load_example(current_index), ann_completed, current_index

def deactivate_participant_id(participant_id, lets_go, total, previous_button, next_button, sentence_text, audio_player, emotions, confidence, comments, n_clicks, ann_completed, current_index):
    global file_list
    global total_annotations
    if participant_id in possible_ids.keys():
        file_list = pd.read_csv(os.path.join(persistent_storage, 'files_to_annotate_2round', f'group_{possible_ids[participant_id]}.csv'), keep_default_na=False)

        
        
        total_annotations = len(file_list)
        total = gr.Number(total_annotations, visible=False)
        

        sentence, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index = load_first_example(participant_id, ann_completed, current_index)
        print(sentence)
        print(start, end, duration)
        participant_id = gr.Textbox(label='What is your participant ID?', value = participant_id, interactive = False)
        lets_go = gr.Button("Participant selected!", interactive = False)
        
        sentence_text = gr.Textbox(label="Transcription", interactive=False, value = sentence)
        emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value =  emotions, visible = True)
        confidence = gr.Radio(["Blank","Very Uncertain", "Somewhat Uncertain", "Neutral", "Somewhat confident", "Very confident"], label="How confident are you that the annotated emotion is present in the recording?", visible = True, value = confidence)
        comments = gr.Textbox(label="Comments", visible =True, value = comments)
        previous_button = gr.Button("Previous Example", visible = True)
        next_button = gr.Button("Next Example",visible = True)

    else:
        gr.Warning("Please insert a valid participant ID")
    return participant_id, lets_go, total, previous_button, next_button, sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index

def count_clicks(n_clicks):
    n_clicks = gr.Number(n_clicks + 1, visible = False)

    return n_clicks
# ===================
# Gradio Interface
# ===================


with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
    # List of all audio files to annotate


    # Initialize an empty DataFrame to store annotations
    annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments', 'n_clicks'])

    # Instructions for emotion annotation
    with gr.Sidebar():
        participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
        lets_go = gr.Button("Let's go!")
        cheat_sheet = gr.HTML(side_bar_html, padding = False)
        #happy_words = gr.Textbox(label = "Happy")

    with gr.Tab("Instructions", elem_id = 'instructions'):
        instructions = gr.HTML(intro_html, padding = False)
        
        with gr.Blocks():
            description = gr.HTML(examples_explanation, padding = False)

            with gr.Accordion(label = "Neutral", open= False):
                neutral_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/neutral.wav', label = "Neutral")
            
            with gr.Accordion(label = "Happy",  open = False):
                happy_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/happy_low.wav', label = "Happy (Low Intensity)")
                happy_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/happy_intense.wav', label = "Happy (High Intensity)")
            
            with gr.Accordion(label = "Sad",  open = False):
                sad_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/sad_low.wav', label = "Sad (Low Intensity)")
                sad_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/sad_intense.wav', label = "Sad (High Intensity)")

            with gr.Accordion(label = "Anger",  open = False):
                angry_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_low.wav', label = "Anger (Low Intensity)")
                angry_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_intense.wav', label = "Anger (High Intensity)")

            #agreement = gr.Checkbox(value = False, label = "I agree", info = "I agree to have my annotations, comments, and questionnaire answers used for research purposes. I understand that any personal information will be anonymized.", interactive = True)

    
    with gr.Tab("Annotation Interface"):
        ann_completed = gr.Number(0, visible=False)
        total = gr.Number(0, visible=False)
        current_index = gr.Number(0, visible = False)
        start = gr.Number(0, visible = False)
        end = gr.Number(0, visible = False)
        duration = gr.Number(0, visible = False)
        n_clicks = gr.Number(0, visible = False)

        # Row with progress bar
        
        gr.HTML("""
        <div id="myProgress">
        <div id="myBar">
        <span id="progressText">Press "Let's go!" to start</span> 
        </div>
        </div>
        """, padding = False)
    
        # Row with audio player
        with gr.Row():
            audio_player = gr.Audio(value= 'blank.mp3', label="Audio", type="filepath", interactive=False, show_download_button = False, show_share_button = False, elem_id = "audio_to_annotate")

        # Hidden row with corresponding sentence
        with gr.Row():
            accordion = gr.Accordion(label="Click to see the sentence", open=False)
            with accordion:
                sentence_text = gr.Textbox(label="Transcription", interactive=False, value = 'This is a sentence.')
        # Row for emotion annotation and confidence
        with gr.Row():        
            emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = False)

        with gr.Row():    
            confidence = gr.Radio(["Blank","Very Uncertain", "Somewhat Uncertain", "Neutral", "Somewhat confident", "Very confident"], label="How confident are you that the annotated emotion is present in the recording?", visible = False)

        with gr.Row():
            # Comment section
            comments = gr.Textbox(label="Comments", visible =False)
            
        # Next and Previous Buttons
        with gr.Row():
            previous_button = gr.Button("Previous Example", visible = False)
            next_button = gr.Button("Next Example", visible = False)

        # Go back
        previous_button.click(
            previous_example,
            inputs=[emotions, confidence, comments, n_clicks, participant_id,  ann_completed, current_index],
            outputs=[sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index],).then(None, [], [start, end, duration, current_index,ann_completed, total], js = js_progress_bar)

        # Go to the next example
        next_button.click(
            next_example,
            inputs=[emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index],
            outputs=[sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index],).then(None, [], [start, end, duration, current_index,ann_completed, total], js = js_progress_bar)
        
        buttons = [previous_button, next_button]
        data = [sentence_text, audio_player, emotions, confidence, comments]
        lets_go.click(deactivate_participant_id, [participant_id, lets_go, total, *buttons, *data, n_clicks, ann_completed, current_index], [participant_id, lets_go, total, *buttons, *data, n_clicks, start, end, duration, ann_completed, current_index]).then( None, [], [start, end, duration, current_index, ann_completed, total], js = js_progress_bar)
        audio_player.play(count_clicks, [n_clicks], [n_clicks])
    
    with gr.Tab("Access Files"):
        with gr.Row():
            with gr.Column():
                password = gr.Textbox(label='Password', interactive = True)
                get_files_button = gr.Button("Get Files")

            with gr.Column():
                files = gr.Files(label="Files")
                storage = gr.Text(label="Total Usage")
        get_files_button.click(get_storage, inputs= [password], outputs=[files, storage], postprocess=False)

        
demo.launch(allowed_paths = ['/data/files_to_annotate_2round'])