Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -84,7 +84,7 @@ if uploaded_file_paths is not None:
|
|
| 84 |
while (len(st.session_state.summaries) < len(valid_files)):
|
| 85 |
st.session_state.summaries.append([])
|
| 86 |
st.info(f'{len(valid_files)} valid files: {[fi.name for fi in valid_files]}')
|
| 87 |
-
for
|
| 88 |
if tab.button("Analyze Audio",key=f"button_{j}"):
|
| 89 |
if uploaded_file is None:
|
| 90 |
tab.error('Upload a file first!')
|
|
@@ -97,14 +97,15 @@ for j, tab in enumerate(audio_tabs):
|
|
| 97 |
|
| 98 |
# RTTM load as filler
|
| 99 |
speakerList, annotations = su.loadAudioRTTM(sample_data[j])
|
| 100 |
-
st.session_state.results[
|
|
|
|
| 101 |
|
| 102 |
-
if len(st.session_state.results) >
|
| 103 |
with st.spinner(text='Loading results...'):
|
| 104 |
# Display breakdowns
|
| 105 |
#--------------------------------------------------------------------------
|
| 106 |
|
| 107 |
-
speakerList, annotations = st.session_state.results[
|
| 108 |
|
| 109 |
# Prepare data
|
| 110 |
sortedSpeakerList = sorted([[row for row in speaker if row[1] > 0.25] for speaker in speakerList if len([row for row in speaker if row[1] > 0.25]) > 0],
|
|
@@ -114,16 +115,16 @@ for j, tab in enumerate(audio_tabs):
|
|
| 114 |
lecturer_pred_count = 2
|
| 115 |
totalSeconds = 9049
|
| 116 |
lecturer_speaker_times = []
|
| 117 |
-
for
|
| 118 |
lecturer_speaker_times.append(0)
|
| 119 |
for timeSection in speaker:
|
| 120 |
-
lecturer_speaker_times[
|
| 121 |
|
| 122 |
all_speaker_times = []
|
| 123 |
-
for
|
| 124 |
all_speaker_times.append(0)
|
| 125 |
for timeSection in speaker:
|
| 126 |
-
all_speaker_times[
|
| 127 |
|
| 128 |
# Lecturer vs. Audience
|
| 129 |
#---------------------------------------------------------------------------
|
|
@@ -254,7 +255,7 @@ for j, tab in enumerate(audio_tabs):
|
|
| 254 |
tab.write("Total length of audio: {}h:{:02d}m:{:02d}s".format(int(totalSeconds/3600),int((totalSeconds%3600)/60),int(totalSeconds%60)))
|
| 255 |
tab.table(df)
|
| 256 |
|
| 257 |
-
st.session_state.summaries[
|
| 258 |
|
| 259 |
with st.spinner(text='Processing summary results...'):
|
| 260 |
summary_count = 0
|
|
|
|
| 84 |
while (len(st.session_state.summaries) < len(valid_files)):
|
| 85 |
st.session_state.summaries.append([])
|
| 86 |
st.info(f'{len(valid_files)} valid files: {[fi.name for fi in valid_files]}')
|
| 87 |
+
for i, tab in enumerate(audio_tabs):
|
| 88 |
if tab.button("Analyze Audio",key=f"button_{j}"):
|
| 89 |
if uploaded_file is None:
|
| 90 |
tab.error('Upload a file first!')
|
|
|
|
| 97 |
|
| 98 |
# RTTM load as filler
|
| 99 |
speakerList, annotations = su.loadAudioRTTM(sample_data[j])
|
| 100 |
+
st.session_state.results[i] = (speakerList,annotations)
|
| 101 |
+
st.session_state.summaries[i] = []
|
| 102 |
|
| 103 |
+
if len(st.session_state.results) > i and len(st.session_state.summaries) > i and len(st.session_state.results[i]) > 0:
|
| 104 |
with st.spinner(text='Loading results...'):
|
| 105 |
# Display breakdowns
|
| 106 |
#--------------------------------------------------------------------------
|
| 107 |
|
| 108 |
+
speakerList, annotations = st.session_state.results[i]
|
| 109 |
|
| 110 |
# Prepare data
|
| 111 |
sortedSpeakerList = sorted([[row for row in speaker if row[1] > 0.25] for speaker in speakerList if len([row for row in speaker if row[1] > 0.25]) > 0],
|
|
|
|
| 115 |
lecturer_pred_count = 2
|
| 116 |
totalSeconds = 9049
|
| 117 |
lecturer_speaker_times = []
|
| 118 |
+
for j,speaker in enumerate(lecturer_speaker_list):
|
| 119 |
lecturer_speaker_times.append(0)
|
| 120 |
for timeSection in speaker:
|
| 121 |
+
lecturer_speaker_times[j] += timeSection[1]
|
| 122 |
|
| 123 |
all_speaker_times = []
|
| 124 |
+
for j,speaker in enumerate(sortedSpeakerList):
|
| 125 |
all_speaker_times.append(0)
|
| 126 |
for timeSection in speaker:
|
| 127 |
+
all_speaker_times[j] += timeSection[1]
|
| 128 |
|
| 129 |
# Lecturer vs. Audience
|
| 130 |
#---------------------------------------------------------------------------
|
|
|
|
| 255 |
tab.write("Total length of audio: {}h:{:02d}m:{:02d}s".format(int(totalSeconds/3600),int((totalSeconds%3600)/60),int(totalSeconds%60)))
|
| 256 |
tab.table(df)
|
| 257 |
|
| 258 |
+
st.session_state.summaries[i] = [totalSeconds,lecturer_speaker_times,all_speaker_times]
|
| 259 |
|
| 260 |
with st.spinner(text='Processing summary results...'):
|
| 261 |
summary_count = 0
|