fpessanha commited on
Commit
c71a422
·
1 Parent(s): 70c66ad

Feat: Add message warning when reaching the final example

Browse files
__pycache__/load_and_save.cpython-310.pyc CHANGED
Binary files a/__pycache__/load_and_save.cpython-310.pyc and b/__pycache__/load_and_save.cpython-310.pyc differ
 
load_and_save.py CHANGED
@@ -43,8 +43,8 @@ def load_first_example(annotations_df, file_list_df, id, completed, index):
43
 
44
  if os.path.exists(path_ann):
45
  annotations_df = pd.read_csv(path_ann, keep_default_na=False)
46
- index = len(annotations_df)
47
- completed = len(annotations_df) - 1 # update how many examples were completed
48
 
49
  else:
50
  # Initialize an empty DataFrame to store annotations
@@ -74,21 +74,39 @@ def load_example(annotations_df, file_list_df, index):
74
  * duration: current sentence duration
75
 
76
  """
77
-
78
- row = file_list_df.iloc[index]
79
- audio_path = os.path.join(persistent_storage, 'files_to_annotate_2round', row["sample_id"].split('-')[0], row["sample_id"] + '.wav')
80
- sentence = row["sentence"]
81
-
82
- # If the user already made an annotation for this example, gradio will return said annotation
83
- ann = (
84
- annotations_df.iloc[index].to_dict() if index < len(annotations_df) else {"sample_id": row["sample_id"], "emotion": 'Blank', "confidence": 'Blank',
85
- "comments": '', "n_clicks": 0}
86
- )
87
-
88
- start = row['start']
89
- end = row['end']
90
- duration = get_audio_duration(audio_path)
91
- print(f'start/end/duration (load example) - {start} {end} {duration}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  return sentence, audio_path, ann['emotion'], ann['confidence'], ann["comments"], ann['n_clicks'], start, end, duration
93
 
94
 
@@ -162,7 +180,10 @@ def next_example(annotations_df, file_list_df, emotions, confidence, comments, n
162
  annotations_df, ann_completed = save_annotation(annotations_df, file_list_df, emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index)
163
  if current_index < len(file_list_df) - 1:
164
  current_index += 1
165
-
 
 
 
166
  sentence, audio_path, emotions, confidence, comments, clicks, start, end, duration = load_example(annotations_df, file_list_df, current_index)
167
  return annotations_df, sentence, audio_path, emotions, confidence, comments, clicks, gr.State(start), gr.State(end), gr.State(duration), ann_completed, current_index
168
 
 
43
 
44
  if os.path.exists(path_ann):
45
  annotations_df = pd.read_csv(path_ann, keep_default_na=False)
46
+ index = min(len(file_list_df) - 1, len(annotations_df))
47
+ completed = len(annotations_df) # update how many examples were completed
48
 
49
  else:
50
  # Initialize an empty DataFrame to store annotations
 
74
  * duration: current sentence duration
75
 
76
  """
77
+ if index < len(file_list_df):
78
+ row = file_list_df.iloc[index]
79
+ audio_path = os.path.join(persistent_storage, 'files_to_annotate_2round', row["sample_id"].split('-')[0], row["sample_id"] + '.wav')
80
+ sentence = row["sentence"]
81
+
82
+ # If the user already made an annotation for this example, gradio will return said annotation
83
+ ann = (
84
+ annotations_df.iloc[index].to_dict() if index < len(annotations_df) else {"sample_id": row["sample_id"], "emotion": 'Blank', "confidence": 'Blank',
85
+ "comments": '', "n_clicks": 0}
86
+ )
87
+
88
+ start = row['start']
89
+ end = row['end']
90
+ duration = get_audio_duration(audio_path)
91
+ print(f'start/end/duration (load example) - {start} {end} {duration}')
92
+ else:
93
+ index -= 1
94
+ row = file_list_df.iloc[index]
95
+ audio_path = os.path.join(persistent_storage, 'files_to_annotate_2round', row["sample_id"].split('-')[0], row["sample_id"] + '.wav')
96
+ sentence = row["sentence"]
97
+
98
+ # If the user already made an annotation for this example, gradio will return said annotation
99
+ ann = (
100
+ annotations_df.iloc[index].to_dict() if index < len(annotations_df) else {"sample_id": row["sample_id"], "emotion": 'Blank', "confidence": 'Blank',
101
+ "comments": '', "n_clicks": 0}
102
+ )
103
+
104
+ start = row['start']
105
+ end = row['end']
106
+ duration = get_audio_duration(audio_path)
107
+ print(f'start/end/duration (load example) - {start} {end} {duration}')
108
+
109
+ gr.Warning("This is the last example, well done!")
110
  return sentence, audio_path, ann['emotion'], ann['confidence'], ann["comments"], ann['n_clicks'], start, end, duration
111
 
112
 
 
180
  annotations_df, ann_completed = save_annotation(annotations_df, file_list_df, emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index)
181
  if current_index < len(file_list_df) - 1:
182
  current_index += 1
183
+
184
+ else:
185
+ gr.Warning("This is the last example, well done!")
186
+ print(f'current_index {current_index}')
187
  sentence, audio_path, emotions, confidence, comments, clicks, start, end, duration = load_example(annotations_df, file_list_df, current_index)
188
  return annotations_df, sentence, audio_path, emotions, confidence, comments, clicks, gr.State(start), gr.State(end), gr.State(duration), ann_completed, current_index
189