fpessanha's picture
Feat: Tweaked instructions styling
626626c
raw
history blame
11.6 kB
import gradio as gr
import pandas as pd
import os
import gradio as gr
css = """#myProgress {
width: 100%;
background-color: var(--block-border-color);
border-radius: 2px;
}
#myBar {
width: 0%;
height: 30px;
background-color: var(--block-title-background-fill);
border-radius: 2px;
}
#progressText {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: var(--block-title-text-color);
font-weight: regular;
font-size: 14px;
}
h1, h2, h3, h4 {
padding: var(--block-title-padding);
color: var(--block-title-text-color);
border: solid var(--block-title-border-width) var(--block-title-border-color);
border-radius: var(--block-title-radius);
background: var(--block-title-background-fill);
width: fit-content;
display: inline-block;
}
h4 {
margin: 0px;
color: var(--primary-800);
}
#instructions {
max-width: 980px;
align-self: center;
}
.content-box {
border-color: var(--block-border-color);
border-radius: var(--block-radius);
background: var(--block-background-fill);
padding: var(--block-label-padding);
}
"""
js_progress_bar = """
function move(n_ann, total_ann) {
var elem = document.getElementById("myBar");
elem.style.width = n_ann/total_ann * 100 + "%";
progressText.innerText = 'Completed: ' + n_ann + ' / ' + total_ann
}
"""
intro_html = """
<h1>Emotionality in Speech</h1>
<div class="content-box">
<p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>
<ul>
<li><h4>Anger</h4></li>
<li><h4>Happiness</h4></li>
<li><h4>Sadness</h4></li>
<li><h4>Neutral</h4></li>
</ul>
<p>This may seem like a small set, but it's a great starting point for analyzing emotions in such a large collection—303 hours of interviews! (That’s 13 days of nonstop listening!)</p>
</div>
<h2>What is the archive you will be annotating?</h2>
<div class="content-box">
<p>You will be annotating short audio clips extracted from the ACT UP (AIDS Coalition to Unleash Power) Oral History Project. This archive features interviews with individuals who were part of ACT UP during the late 1980s and early 1990s, amidst the AIDS epidemic.</p>
<p>In each video, the subjects talk about their life before the epidemic, how they were affected by AIDS and their work in ACT UP.</p>
</div>
<h2>What will you be annotating?</h2>
<div class="content-box">
<p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
<ul>
<li>
<strong>Predominant Emotion:</strong> The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
</li>
<li>
<strong>Perceived Emotion at the Time of Recording:</strong> In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
</li>
<li>
<strong>Speech Emotionality:</strong> Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
</li>
</ul>
<p>Further, you will be asked to fill "How confident you are that the annotated emotion is present in the recording?" from a scale of 0 to 10, with 0 being "not at all confident" and 1 being "certain, completely confident".</p>
<p>There will be a "Comment/Feedback" section where you can makes notes.<br>Below the audio, there will be an option to view the transcribed sentence. Please use this only if you are struggling to understand the audio.</p>
<p>Let’s explore the four possible categories and listen to some examples!</p>
</div>
"""
# List of all audio files to annotate
file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
total_annotations = len(file_list)
# Initialize an empty DataFrame to store annotations
annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments'])
current_index = {"index": 0} # Dictionary to allow modifying inside functions
def load_example(index):
"""Loads the example in row #index from dataframe file_list.
If there are any annotations it will give those values to the annotation dataframe"""
row = file_list.iloc[index]
audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav')
sentence = row["SENTENCE"]
# If the user already made an annotation for this example, gradio will return said annotation
previous_annotation = (
annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["SAMPLE ID"], "emotion": 'Blank', "confidence": 0,
"comments": ''}
)
return (sentence, audio_path, previous_annotation['emotion'], previous_annotation['confidence'], current_index['index'] + 1, previous_annotation["comments"])
def save_annotation(emotions, confidence, comments, participant_id):
"""Save the annotation for the current example."""
idx = current_index["index"]
row = file_list.iloc[idx]
sample_id = row["SAMPLE ID"]
sentence = row["SENTENCE"]
# Update or append annotation
if sample_id in annotations["sample_id"].values:
annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "confidence", "comments"]] = \
[emotions, confidence, comments]
else:
annotations.loc[len(annotations)] = [sample_id, sentence, emotions, confidence, comments]
ann_completed.value += 1
annotations.to_csv(f"{participant_id}_annotations.csv", index=False) # Save to a CSV file
def next_example(emotions, confidence, comments, participant_id):
"""Move to the next example."""
if emotions == "Blank":
gr.Warning("Please fill out the emotion section")
else:
save_annotation(emotions, confidence, comments, participant_id)
if current_index["index"] < len(file_list) - 1:
current_index["index"] += 1
return load_example(current_index["index"])
def previous_example(emotion, confidence, comments, participant_id):
"""Move to the previous example."""
if emotion.value != "Blank":
save_annotation(emotion, confidence, comments, participant_id)
if current_index["index"] > 0:
current_index["index"] -= 1
return load_example(current_index["index"])
return load_example(current_index["index"])
def deactivate_participant_id(participant_id, lets_go):
participant_id = gr.Textbox(label='What is your participant ID?', value = participant_id, interactive = False)
lets_go = gr.Button("Participant selected!", interactive = False)
return participant_id, lets_go
def activate_elements(emotions, confidence, comments, next_button, previous_button):
emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = True)
confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10, visible = True)
comments = gr.Textbox(label="Comments", visible =True)
previous_button = gr.Button("Previous Example", visible = True)
next_button = gr.Button("Next Example",visible = True)
return emotions, confidence, comments, next_button, previous_button
# ===================
# Gradio Interface
# ===================
with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
# Instructions for emotion annotation
with gr.Sidebar():
participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
lets_go = gr.Button("Let's go!")
#happy_words = gr.Textbox(label = "Happy")
with gr.Tab("Instructions", elem_id = 'instructions'):
instructions = gr.HTML(intro_html, padding = False)
agreement = gr.Checkbox(value = False, label = "I agree", info = "I agree to have my annotations, comments, and questionnaire answers used for research purposes. I understand that any personal information will be anonymized.", interactive = True)
with gr.Tab("Annotation Interface"):
ann_completed = gr.Number(0, visible=False)
total = gr.Number(total_annotations, visible=False)
# Row with progress bar
gr.HTML("""
<div id="myProgress">
<div id="myBar">
<span id="progressText">Press "Let's go!" to start</span>
</div>
</div>""", padding = False)
# Row with audio player
with gr.Row():
audio_player = gr.Audio(value= 'test.mp3', label="Audio", type="filepath", interactive=False, show_download_button = False, show_share_button = False)
# Hidden row with corresponding sentence
with gr.Row():
accordion = gr.Accordion(label="Click to see the sentence", open=False)
with accordion:
sentence_text = gr.Textbox(label="Transcription", interactive=False, value = 'This is a sentence.')
# Row for emotion annotation and confidence
with gr.Row():
emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = False)
with gr.Row():
confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10, visible = False)
with gr.Row():
# Comment section
comments = gr.Textbox(label="Comments", visible =False)
# Next and Previous Buttons
with gr.Row():
previous_button = gr.Button("Previous Example", visible = False)
next_button = gr.Button("Next Example", visible = False)
# Go back
previous_button.click(
previous_example,
inputs=[emotions, confidence, comments, participant_id],
outputs=[sentence_text, audio_player, emotions, confidence, ann_completed, comments],
)
# Go to the next example
next_button.click(
next_example,
inputs=[emotions, confidence, comments, participant_id],
outputs=[sentence_text, audio_player, emotions, confidence, ann_completed, comments],
)
#Update progress bar
next_button.click(None, [], [ann_completed, total], js = js_progress_bar)
lets_go.click(None, [], [ann_completed, total], js = js_progress_bar)
lets_go.click(deactivate_participant_id, [participant_id, lets_go], [participant_id, lets_go])
lets_go.click(activate_elements, [emotions, confidence, comments, next_button, previous_button], [emotions, confidence, comments, next_button, previous_button])
lets_go.click(load_example, inputs = [gr.Number(current_index["index"], visible = False)], outputs = [sentence_text, audio_player, emotions, confidence, ann_completed, comments])
demo.launch()