fpessanha commited on
Commit
d8e60e3
·
1 Parent(s): a02f3a0

Feat: add progress bar

Browse files
Files changed (2) hide show
  1. annotations.csv +4 -1
  2. app.py +44 -17
annotations.csv CHANGED
@@ -1,2 +1,5 @@
1
  sample_id,sentence,emotion,comments
2
- 007-0023,I was mostly hanging out in East Village in the clubs.,Angry,
 
 
 
 
1
  sample_id,sentence,emotion,comments
2
+ 007-0023,I was mostly hanging out in East Village in the clubs.,Neutral,
3
+ 007-0034,"And we became friends, and then he told me that his lover had died of AIDS.",Angry,
4
+ 007-0086,"I mean, given the size of ACT UP, I mean, really, you're talking about a handful.",Neutral,
5
+ 007-0129,"And why is he like, you know, why are you putting him in yellow face?",Neutral,
app.py CHANGED
@@ -6,13 +6,18 @@ import gradio as gr
6
 
7
  # List of all audio files to annotate
8
  file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
 
 
9
 
10
  # Initialize an empty DataFrame to store annotations
11
  annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'comments'])
12
  current_index = {"index": 0} # Dictionary to allow modifying inside functions
13
 
 
14
  def load_example(index):
15
- """Load the example (audio + text) by index."""
 
 
16
  row = file_list.iloc[index]
17
  audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav')
18
  print(f"Audio path: {audio_path}, Exists: {os.path.exists(audio_path)}")
@@ -41,18 +46,19 @@ def save_annotation(emotions, comments):
41
  annotations.loc[len(annotations)] = [sample_id, sentence, emotions, comments]
42
 
43
  annotations.to_csv("annotations.csv", index=False) # Save to a CSV file
44
- #return f"Saved annotations for example {idx + 1}"
45
 
46
  def next_example(emotions, comments):
 
47
  if emotions == '':
48
  raise gr.Error("Please fill out the emotion section")
49
  else:
50
- """Move to the next example."""
51
  save_annotation(emotions, comments)
52
-
53
  if current_index["index"] < len(file_list) - 1:
54
  current_index["index"] += 1
55
  return load_example(current_index["index"])
 
56
  #return "End of examples", None, 0, 0, 0, 0, ''
57
 
58
 
@@ -64,12 +70,21 @@ def previous_example(emotion, comments):
64
  return load_example(current_index["index"])
65
  return load_example(current_index["index"])
66
 
67
- # Gradio Interface
 
 
 
 
 
 
 
68
 
 
69
  audio_path = 'test.mp3'
70
- with (gr.Blocks() as demo):
71
-
72
-
 
73
  # Row with audio player
74
  with gr.Row():
75
  audio_player = gr.Audio(value=audio_path, label="Audio", type="filepath", interactive=False)
@@ -87,29 +102,41 @@ with (gr.Blocks() as demo):
87
 
88
  # Instructions for emotion annotation
89
  with gr.Sidebar():
90
- gr.Textbox()
91
 
92
 
93
  # Next and Previous Buttons
94
  with gr.Row():
95
- next_button = gr.Button("Next Example")
96
  previous_button = gr.Button("Previous Example")
 
 
97
 
98
  comments = gr.Textbox(label="Comments", interactive=True)
99
- # Initial load
 
100
  sentence_text.value, audio_player.value, emotions.value, comments.value = load_example(
101
- current_index["index"]
102
- )
103
 
104
- next_button.click(
105
- next_example,
106
  inputs=[emotions, comments],
107
  outputs=[sentence_text, audio_player, emotions, comments],
108
  )
109
- previous_button.click(
110
- previous_example,
 
111
  inputs=[emotions, comments],
112
  outputs=[sentence_text, audio_player, emotions, comments],
113
  )
114
 
 
 
 
 
 
 
 
 
 
115
  demo.launch()
 
 
6
 
7
  # List of all audio files to annotate
8
  file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
9
+ annotations_completed = 0
10
+ total_annotations = len(file_list)
11
 
12
  # Initialize an empty DataFrame to store annotations
13
  annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'comments'])
14
  current_index = {"index": 0} # Dictionary to allow modifying inside functions
15
 
16
+
17
  def load_example(index):
18
+ """Loads the example in row #index from dataframe file_list.
19
+ If there are any annotations it will give those values to the annotation dataframe"""
20
+
21
  row = file_list.iloc[index]
22
  audio_path = os.path.join('files_to_annotate_padded_smaller_emotion_set', row["SAMPLE ID"].split('-')[0], row["SAMPLE ID"] + '.wav')
23
  print(f"Audio path: {audio_path}, Exists: {os.path.exists(audio_path)}")
 
46
  annotations.loc[len(annotations)] = [sample_id, sentence, emotions, comments]
47
 
48
  annotations.to_csv("annotations.csv", index=False) # Save to a CSV file
49
+
50
 
51
  def next_example(emotions, comments):
52
+ """Move to the next example."""
53
  if emotions == '':
54
  raise gr.Error("Please fill out the emotion section")
55
  else:
56
+
57
  save_annotation(emotions, comments)
 
58
  if current_index["index"] < len(file_list) - 1:
59
  current_index["index"] += 1
60
  return load_example(current_index["index"])
61
+
62
  #return "End of examples", None, 0, 0, 0, 0, ''
63
 
64
 
 
70
  return load_example(current_index["index"])
71
  return load_example(current_index["index"])
72
 
73
+ def update_progress():
74
+ """Update annotation progress"""
75
+ global annotations_completed
76
+ if annotations_completed < total_annotations:
77
+ annotations_completed += 1
78
+
79
+ progress = annotations_completed / total_annotations
80
+ return f"Progress: {annotations_completed}/{annotations_tasks}"
81
 
82
+ # Gradio Interface
83
  audio_path = 'test.mp3'
84
+ with (gr.Blocks(theme=gr.themes.Soft()) as demo):
85
+ gr.Markdown("# Task Progress Tracker")
86
+ progress_text = gr.Textbox(value="Progress: 0/10", interactive=False)
87
+
88
  # Row with audio player
89
  with gr.Row():
90
  audio_player = gr.Audio(value=audio_path, label="Audio", type="filepath", interactive=False)
 
102
 
103
  # Instructions for emotion annotation
104
  with gr.Sidebar():
105
+ happy_words = gr.Textbox(label = "Happy")
106
 
107
 
108
  # Next and Previous Buttons
109
  with gr.Row():
 
110
  previous_button = gr.Button("Previous Example")
111
+ next_button = gr.Button("Next Example")
112
+
113
 
114
  comments = gr.Textbox(label="Comments", interactive=True)
115
+
116
+
117
  sentence_text.value, audio_player.value, emotions.value, comments.value = load_example(
118
+ current_index["index"])
 
119
 
120
+ previous_button.click(
121
+ previous_example,
122
  inputs=[emotions, comments],
123
  outputs=[sentence_text, audio_player, emotions, comments],
124
  )
125
+
126
+ next_button.click(
127
+ next_example,
128
  inputs=[emotions, comments],
129
  outputs=[sentence_text, audio_player, emotions, comments],
130
  )
131
 
132
+ next_button.click(update_progress, outputs=[progress_text])
133
+
134
+
135
+
136
+
137
+
138
+
139
+
140
+
141
  demo.launch()
142
+