fpessanha's picture
Feat: Add blank audio
9971ae0
raw
history blame
21.8 kB
import gradio as gr
import pandas as pd
import os
import gradio as gr
from pathlib import Path
from huggingface_hub import login
from mutagen.mp3 import MP3
from mutagen.wave import WAVE
css = """#myProgress {
width: 100%;
background-color: var(--block-border-color);
border-radius: 2px;
}
#myBar {
width: 0%;
height: 30px;
background-color: var(--block-title-background-fill);
border-radius: 2px;
}
#progressText {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: var(--block-title-text-color);
font-weight: regular;
font-size: 14px;
}
h1, h2, h3, h4 {
padding: var(--block-title-padding);
color: var(--block-title-text-color);
border: solid var(--block-title-border-width) var(--block-title-border-color);
border-radius: var(--block-title-radius);
background: var(--block-title-background-fill);
width: fit-content;
display: inline-block;
}
h4 {
margin: 0px;
color: var(--block-title-background-fill);
background: var(--block-title-text-color);
}
#instructions {
max-width: 980px;
align-self: center;
}
.content-box {
border-color: var(--block-border-color);
border-radius: var(--block-radius);
background: var(--block-background-fill);
padding: var(--block-label-padding);
}
"""
js_progress_bar = """
function move(start, end, total_duration, current_index, n_ann, total_ann) {
var elem = document.getElementById("myBar");
elem.style.width = n_ann/total_ann * 100 + "%";
progressText.innerText = `${current_index} / ${total_ann} (Completed: ${n_ann})`;
const waveform = document.querySelector('#audio_to_annotate #waveform div');
const shadowRoot = waveform.shadowRoot;
const canvases = shadowRoot.querySelector('.wrapper');
console.log(canvases.offsetWidth)
const leftOffsetPct = start / total_duration;
const widthPct = (end - start) / total_duration;
// Get CSS variable for background color
const blockColor = getComputedStyle(document.documentElement)
.getPropertyValue('--block-title-background-fill')
.trim() || 'red'; // Default to red if variable is not found
// Create a style element for the shadow DOM
const style = document.createElement('style');
style.textContent = `
.wrapper::after {
content: '';
position: absolute;
top: 0;
left: ${canvases.offsetWidth * leftOffsetPct}px;
width: ${canvases.offsetWidth * widthPct}px;
height: 100%;
background-color: blue;
z-index: 999;
opacity: 0.5;
}
/* Ensure parent has positioning context */
.wrapper {
position: relative;
}
`;
// Append the style to the shadow root
shadowRoot.appendChild(style);
console.log(start + ' ' + end + ' ' + total_duration);
}
"""
intro_html = """
<div class="content-box">
<p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>
<ul>
<li><h4>Anger</h4></li>
<li><h4>Happiness</h4></li>
<li><h4>Sadness</h4></li>
<li><h4>Neutral</h4></li>
</ul>
<p>This may seem like a small set, but it's a great starting point for analyzing emotions in such a large collection— <strong>303 hours of interviews! (That’s 13 days of nonstop listening! &#128558)</strong> </p>
</div>
<h2>The ACT-UP Oral History Project</h2>
<div class="content-box">
<p>You will be annotating short audio clips extracted from the ACT UP (AIDS Coalition to Unleash Power) Oral History Project developed by Sarah Schulman and Jim Hubbard . This archive features interviews with individuals who were part of ACT UP during the late 1980s and early 1990s, amidst the AIDS epidemic. In each video, the subjects talk about their life before the epidemic, how they were affected by AIDS and their work in ACT UP. The project comprises 187 interviews with members of the AIDS Coalition to Unleash Power (ACT UP) during the AIDS epidemic in New York in the late 1980s and early 1990s.
</p><p>Schulman sought to document the group’s public activism and capture the atmosphere among its members at the height of the crisis: </p><h4>"I wanted to show how crazy and desperate everyone was at that point, organizing political funerals and riding around in vans with the bodies of their dead friends. I wanted to convey what the suffering was like at that point."</h4><p>
Sullivan describes the archive as a space that embodies challenging emotions, such as the pervasive fear of death, grief, and what Jim Hubbard refers to as the activists' "righteous anger." </p>
</div>
<h2>What will you be annotating?</h2>
<div class="content-box">
<p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
<ul>
<li>
<h4>Predominant Emotion:</h4> The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
</li>
<li>
<h4>Perceived Emotion at the Time of Recording:</h4> In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
</li>
<li>
<h4>Speech Emotionality:</h4> Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
</li>
</ul>
<p>Further, you will be asked to fill <strong>"How confident you are that the annotated emotion is present in the recording?"</strong> from a scale of 0 to 10, with 0 being "not at all confident" and 1 being "certain, completely confident". There will be a <strong>"Comment/Feedback"</strong> section where you can makes notes. Below the audio, there will be an option to view the transcribed sentence. Please use this only if you are struggling to understand the audio.</p>
</div>
"""
examples_explanation = """<h3>Audio examples</h3>
<div class="content-box">
<p>Let's check out examples for the four emotions to annotate. Note that all these examples use the same sentence and are acted out, making the emotionality in speech more apparent. In a real-world setting, emotionality is more complex, so you will find a list of additional emotions within each of the three emotion categories (Happy, Sad, and Angry) to assist you during annotation.</p>
</div>"""
side_bar_html = """
<h3>The task</h3>
<div class="content-box">
<p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
<span>&#9989;</span>
<h4 style="margin: 0;">Predominant Emotion</h4>
</div>
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
<span>&#9989;</span>
<h4 style="margin: 0;">Perceived Emotion at the Time of Recording</h4>
</div>
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
<span>&#9989;</span>
<h4 style="margin: 0;">Speech Emotionality</h4>
</div>
</div>
<h3>Major subclasses</h3>
<div class="content-box">
<table border="1">
<thead>
<tr>
<th>Emotion Label</th>
<th>Major Subclasses</th>
</tr>
</thead>
<tbody>
<tr>
<td>Happiness</td>
<td>Affection, Goodwill, Joy, Satisfaction, Zest, Acceptance, Pride, Hope, Excitement, Relief, Passion, Caring</td>
</tr>
<tr>
<td>Sadness</td>
<td>Suffering, Regret, Displeasure, Embarrassment, Sympathy, Depression</td>
</tr>
<tr>
<td>Anger</td>
<td>Irritability, Torment, Jealousy, Disgust, Rage, Frustration</td>
</tr>
</tbody>
</table>
</div>
"""
persistent_storage = Path('/data')
# List of all audio files to annotate
global file_list
global total_annotations
# Initialize an empty DataFrame to store annotations
annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments', 'n_clicks'])
password_files = os.getenv("password_files")
possible_ids = {'Tiger-001': 0, 'Panda-002': 0,
'Falcon-003': 1, 'Wolf-004': 1,
'Dolphin-005': 2, 'Eagle-006': 2,
'Jaguar-007': 3, 'Rhino-008': 3,
'Zebra-009': 4, 'Lion-010': 4,
'Cheetah-011': 5, 'Bear-012': 5}
def get_audio_duration(file_path):
if file_path.lower().endswith('.mp3'):
audio = MP3(file_path)
elif file_path.lower().endswith(('.wav', '.wave')):
audio = WAVE(file_path)
else:
raise ValueError("Unsupported file format")
return audio.info.length # Duration in seconds
def get_storage(password):
#source: https://discuss.huggingface.co/t/accessing-data-folder-of-persistent-storage/46681/2
if password == password_files:
files = [
{
"orig_name": file.name,
"name": file.resolve(),
"size": file.stat().st_size,
"data": None,
"is_file": True,
}
for file in persistent_storage.glob("**/*.csv")
if file.is_file()
]
usage = sum([f['size'] for f in files])
else:
gr.Warning("Please provide the correct password")
return files, f"{usage/(1024.0 ** 3):.3f}GB"
def load_first_example(participant_id, ann_completed, current_index):
""" Loads and first example and updates index"""
global annotations
path_ann = f'{persistent_storage}/{participant_id}_annotations.csv'
print(path_ann)
if os.path.exists(path_ann):
annotations = pd.read_csv(path_ann, keep_default_na=False)
current_index = len(annotations)
print('path was found')
print(annotations)
print(len(annotations))
ann_completed = gr.Number(len(annotations) - 1, visible=False)
print(len(annotations))
return *load_example(current_index), ann_completed, current_index
def load_example(index):
"""Loads the example in row #index from dataframe file_list.
If there are any annotations it will give those values to the annotation dataframe"""
row = file_list.iloc[index]
audio_path = os.path.join(persistent_storage, 'files_to_annotate_2round', row["sample_id"].split('-')[0], row["sample_id"] + '.wav')
sentence = row["sentence"]
# If the user already made an annotation for this example, gradio will return said annotation
previous_annotation = (
annotations.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["sample_id"], "emotion": 'Blank', "confidence": 0,
"comments": '', "n_clicks": 0}
)
start = row['start']
end = row['end']
duration = get_audio_duration(audio_path)
print(f'{start} {end} {duration}')
return (sentence, audio_path, previous_annotation['emotion'], previous_annotation['confidence'], previous_annotation["comments"], n_clicks, start, end, duration)
def save_annotation(emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
"""Save the annotation for the current example."""
row = file_list.iloc[current_index]
sample_id = row["sample_id"]
sentence = row["sentence"]
# Update or append annotation
if sample_id in annotations["sample_id"].values:
annotations.loc[annotations["sample_id"] == sample_id, ["emotion", "confidence", "comments", "n_clicks"]] = \
[emotions, confidence, comments, n_clicks]
else:
annotations.loc[len(annotations)] = [sample_id, sentence, emotions, confidence, comments, n_clicks]
ann_completed = gr.Number(ann_completed + 1, visible=False)
annotations.to_csv(f"{persistent_storage}/{participant_id}_annotations.csv", index=False) # Save to a CSV file
return ann_completed
def next_example(emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
"""Move to the next example."""
if emotions == "Blank":
gr.Warning("Please fill out the emotion section. 'Blank' is not a valid emotion.")
else:
ann_completed = save_annotation(emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index)
if current_index < len(file_list) - 1:
current_index += 1
return *load_example(current_index), ann_completed, current_index
def previous_example(emotion, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
"""Move to the previous example."""
if emotion != "Blank":
ann_completed = save_annotation(emotion, confidence, comments, n_clicks, participant_id, ann_completed, current_index)
if current_index > 0:
current_index -= 1
return *load_example(current_index), ann_completed, current_index
def deactivate_participant_id(participant_id, lets_go, total, previous_button, next_button, sentence_text, audio_player, emotions, confidence, comments, n_clicks, ann_completed, current_index):
global file_list
global total_annotations
if participant_id in possible_ids.keys():
file_list = pd.read_csv(os.path.join(persistent_storage, 'files_to_annotate_2round', f'group_{possible_ids[participant_id]}.csv'), keep_default_na=False)
total_annotations = len(file_list)
total = gr.Number(total_annotations, visible=False)
sentence, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index = load_first_example(participant_id, ann_completed, current_index)
participant_id = gr.Textbox(label='What is your participant ID?', value = participant_id, interactive = False)
lets_go = gr.Button("Participant selected!", interactive = False)
emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = emotions, visible = True)
confidence = gr.Slider(label="How confident are you that the annotated emotion is present in the recording? (%)", minimum=0, maximum=100, step=10, visible = True, value = confidence)
comments = gr.Textbox(label="Comments", visible =True, value = comments)
previous_button = gr.Button("Previous Example", visible = True)
next_button = gr.Button("Next Example",visible = True)
else:
gr.Warning("Please insert a valid participant ID")
return participant_id, lets_go, total, previous_button, next_button, sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index
def count_clicks(n_clicks):
n_clicks = gr.Number(n_clicks + 1, visible = False)
return n_clicks
# ===================
# Gradio Interface
# ===================
with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
# Instructions for emotion annotation
with gr.Sidebar():
participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
lets_go = gr.Button("Let's go!")
cheat_sheet = gr.HTML(side_bar_html, padding = False)
#happy_words = gr.Textbox(label = "Happy")
with gr.Tab("Instructions", elem_id = 'instructions'):
instructions = gr.HTML(intro_html, padding = False)
with gr.Blocks():
description = gr.HTML(examples_explanation, padding = False)
with gr.Accordion(label = "Neutral", open= False):
neutral_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/neutral.wav', label = "Neutral")
with gr.Accordion(label = "Happy", open = False):
happy_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/happy_low.wav', label = "Happy (Low Intensity)")
happy_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/happy_intense.wav', label = "Happy (High Intensity)")
with gr.Accordion(label = "Sad", open = False):
sad_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/sad_low.wav', label = "Sad (Low Intensity)")
sad_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/sad_intense.wav', label = "Sad (High Intensity)")
with gr.Accordion(label = "Anger", open = False):
angry_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_low.wav', label = "Anger (Low Intensity)")
angry_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_intense.wav', label = "Anger (High Intensity)")
#agreement = gr.Checkbox(value = False, label = "I agree", info = "I agree to have my annotations, comments, and questionnaire answers used for research purposes. I understand that any personal information will be anonymized.", interactive = True)
with gr.Tab("Annotation Interface"):
ann_completed = gr.Number(0, visible=False)
total = gr.Number(0, visible=False)
current_index = gr.Number(0, visible = False)
start = gr.Number(0, visible = False)
end = gr.Number(0, visible = False)
duration = gr.Number(0, visible = False)
n_clicks = gr.Number(0, visible = False)
# Row with progress bar
gr.HTML("""
<div id="myProgress">
<div id="myBar">
<span id="progressText">Press "Let's go!" to start</span>
</div>
</div>
""", padding = False)
# Row with audio player
with gr.Row():
audio_player = gr.Audio(value= 'blank.mp3', label="Audio", type="filepath", interactive=False, show_download_button = False, show_share_button = False, elem_id = "audio_to_annotate")
# Hidden row with corresponding sentence
with gr.Row():
accordion = gr.Accordion(label="Click to see the sentence", open=False)
with accordion:
sentence_text = gr.Textbox(label="Transcription", interactive=False, value = 'This is a sentence.')
# Row for emotion annotation and confidence
with gr.Row():
emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = False)
with gr.Row():
confidence = gr.Slider(label="Confidence (%)", minimum=0, maximum=100, step=10, visible = False)
with gr.Row():
# Comment section
comments = gr.Textbox(label="Comments", visible =False)
# Next and Previous Buttons
with gr.Row():
previous_button = gr.Button("Previous Example", visible = False)
next_button = gr.Button("Next Example", visible = False)
# Go back
previous_button.click(
previous_example,
inputs=[emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index],
outputs=[sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index],).then(None, [], [start, end, duration, current_index,ann_completed, total], js = js_progress_bar)
# Go to the next example
next_button.click(
next_example,
inputs=[emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index],
outputs=[sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index],).then(None, [], [start, end, duration, current_index,ann_completed, total], js = js_progress_bar)
buttons = [previous_button, next_button]
data = [sentence_text, audio_player, emotions, confidence, comments]
lets_go.click(deactivate_participant_id, [participant_id, lets_go, total, *buttons, *data, n_clicks, ann_completed, current_index], [participant_id, lets_go, total, *buttons, *data, n_clicks, start, end, duration, ann_completed, current_index]).then( None, [], [start, end, duration, current_index, ann_completed, total], js = js_progress_bar)
audio_player.play(count_clicks, [n_clicks], [n_clicks])
with gr.Tab("Access Files"):
with gr.Row():
with gr.Column():
password = gr.Textbox(label='Password', interactive = True)
get_files_button = gr.Button("Get Files")
with gr.Column():
files = gr.Files(label="Files")
storage = gr.Text(label="Total Usage")
get_files_button.click(get_storage, inputs= [password], outputs=[files, storage], postprocess=False)
demo.launch(allowed_paths = ['/data/files_to_annotate_2round'])