Spaces:
Sleeping
Sleeping
Merge branch #Sonogram/Single-Audio-Demo' into 'Sonogram/Instructor-Support-Tool'
Browse files- README.md +1 -1
- app.py +512 -188
- requirements.txt +3 -2
- sonogram_utility.py +80 -0
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🌍
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: indigo
|
| 6 |
sdk: streamlit
|
| 7 |
-
sdk_version: 1.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
license: mit
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: indigo
|
| 6 |
sdk: streamlit
|
| 7 |
+
sdk_version: 1.45.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
license: mit
|
app.py
CHANGED
|
@@ -26,6 +26,24 @@ import datetime as dt
|
|
| 26 |
enableDenoise = False
|
| 27 |
earlyCleanup = True
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
def save_data(
|
| 30 |
config_dict: Dict[str,str], audio_paths: List[str], userid: str,
|
| 31 |
) -> None:
|
|
@@ -138,18 +156,21 @@ def analyze(inFileName):
|
|
| 138 |
currFileIndex = file_names.index(inFileName)
|
| 139 |
print(f"Found at index {currFileIndex}")
|
| 140 |
if len(st.session_state.results) > currFileIndex and len(st.session_state.summaries) > currFileIndex and len(st.session_state.results[currFileIndex]) > 0:
|
|
|
|
|
|
|
| 141 |
# Handle
|
| 142 |
currAnnotation, currTotalTime = st.session_state.results[currFileIndex]
|
| 143 |
speakerNames = currAnnotation.labels()
|
| 144 |
-
|
| 145 |
# Update other categories
|
| 146 |
unusedSpeakers = st.session_state.unusedSpeakers[currFileIndex]
|
| 147 |
categorySelections = st.session_state["categorySelect"][currFileIndex]
|
| 148 |
-
|
| 149 |
noVoice, oneVoice, multiVoice = su.calcSpeakingTypes(currAnnotation,currTotalTime)
|
| 150 |
sumNoVoice = su.sumTimes(noVoice)
|
| 151 |
sumOneVoice = su.sumTimes(oneVoice)
|
| 152 |
sumMultiVoice = su.sumTimes(multiVoice)
|
|
|
|
| 153 |
|
| 154 |
df3 = pd.DataFrame(
|
| 155 |
{
|
|
@@ -161,6 +182,7 @@ def analyze(inFileName):
|
|
| 161 |
)
|
| 162 |
df3.name = "df3"
|
| 163 |
st.session_state.summaries[currFileIndex]["df3"] = df3
|
|
|
|
| 164 |
|
| 165 |
df4_dict = {}
|
| 166 |
nameList = st.session_state.categories
|
|
@@ -181,13 +203,17 @@ def analyze(inFileName):
|
|
| 181 |
else:
|
| 182 |
extraNames.append(sp)
|
| 183 |
extraValues.append(su.sumTimes(currAnnotation.subset([sp])))
|
|
|
|
|
|
|
| 184 |
df4_dict = {
|
| 185 |
-
"values": valueList+extraValues,
|
| 186 |
-
"names": nameList+extraNames,
|
| 187 |
}
|
| 188 |
df4 = pd.DataFrame(data=df4_dict)
|
| 189 |
df4.name = "df4"
|
| 190 |
st.session_state.summaries[currFileIndex]["df4"] = df4
|
|
|
|
|
|
|
| 191 |
|
| 192 |
speakerList,timeList = su.sumTimesPerSpeaker(oneVoice)
|
| 193 |
multiSpeakerList, multiTimeList = su.sumMultiTimesPerSpeaker(multiVoice)
|
|
@@ -228,6 +254,7 @@ def analyze(inFileName):
|
|
| 228 |
)
|
| 229 |
df5.name = "df5"
|
| 230 |
st.session_state.summaries[currFileIndex]["df5"] = df5
|
|
|
|
| 231 |
|
| 232 |
speakers_dataFrame,speakers_times = su.annotationToDataFrame(currAnnotation)
|
| 233 |
st.session_state.summaries[currFileIndex]["speakers_dataFrame"] = speakers_dataFrame
|
|
@@ -239,6 +266,7 @@ def analyze(inFileName):
|
|
| 239 |
}
|
| 240 |
df2 = pd.DataFrame(df2_dict)
|
| 241 |
st.session_state.summaries[currFileIndex]["df2"] = df2
|
|
|
|
| 242 |
except ValueError as e:
|
| 243 |
print(f"Value Error: {e}")
|
| 244 |
pass
|
|
@@ -290,7 +318,7 @@ if 'results' not in st.session_state:
|
|
| 290 |
if 'summaries' not in st.session_state:
|
| 291 |
st.session_state.summaries = []
|
| 292 |
if 'categories' not in st.session_state:
|
| 293 |
-
st.session_state.categories = [
|
| 294 |
st.session_state.categorySelect = []
|
| 295 |
# Single Use
|
| 296 |
if 'removeCategory' not in st.session_state:
|
|
@@ -302,6 +330,8 @@ if 'unusedSpeakers' not in st.session_state:
|
|
| 302 |
st.session_state.unusedSpeakers = []
|
| 303 |
if 'file_names' not in st.session_state:
|
| 304 |
st.session_state.file_names = []
|
|
|
|
|
|
|
| 305 |
|
| 306 |
|
| 307 |
|
|
@@ -314,7 +344,8 @@ if not isGPU:
|
|
| 314 |
|
| 315 |
uploaded_file_paths = st.file_uploader("Upload an audio of classroom activity to analyze", accept_multiple_files=True)
|
| 316 |
|
| 317 |
-
supported_file_types = ('.wav','.mp3','.mp4','.txt')
|
|
|
|
| 318 |
|
| 319 |
valid_files = []
|
| 320 |
file_paths = []
|
|
@@ -328,7 +359,7 @@ if uploaded_file_paths is not None:
|
|
| 328 |
file_names = []
|
| 329 |
# Reset valid_files?
|
| 330 |
for uploaded_file in uploaded_file_paths:
|
| 331 |
-
if not uploaded_file.name.endswith(supported_file_types):
|
| 332 |
st.error('File must be of type: {}'.format(supported_file_types))
|
| 333 |
uploaded_file = None
|
| 334 |
else:
|
|
@@ -353,12 +384,7 @@ if uploaded_file_paths is not None:
|
|
| 353 |
st.session_state.categorySelect.append(tempCategories)
|
| 354 |
while (len(st.session_state.summaries) < len(valid_files)):
|
| 355 |
st.session_state.summaries.append([])
|
| 356 |
-
|
| 357 |
-
# Clear replaced files
|
| 358 |
-
for i in range(len(valid_files)):
|
| 359 |
-
if len(st.session_state.results[i]) > 0 and st.session_state.results[i][0] != file_names[i]:
|
| 360 |
-
st.session_state.results[i] = []
|
| 361 |
-
st.session_state.summaries[i] = []'''
|
| 362 |
st.session_state.file_names = file_names
|
| 363 |
|
| 364 |
file_names = st.session_state.file_names
|
|
@@ -377,11 +403,39 @@ else:
|
|
| 377 |
if len(st.session_state.results) > i and len(st.session_state.results[i]) > 0:
|
| 378 |
continue
|
| 379 |
# Text files use sample data
|
| 380 |
-
if file_paths[i].endswith('.txt'):
|
| 381 |
with st.spinner(text=f'Loading Demo File {i+1} of {totalFiles}'):
|
| 382 |
-
time.sleep(1)
|
| 383 |
# RTTM load as filler
|
| 384 |
-
speakerList, annotations = su.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 385 |
# Approximate total seconds
|
| 386 |
totalSeconds = 0
|
| 387 |
for segment in annotations.itersegments():
|
|
@@ -392,7 +446,6 @@ else:
|
|
| 392 |
speakerNames = annotations.labels()
|
| 393 |
st.session_state.unusedSpeakers[i] = speakerNames
|
| 394 |
else:
|
| 395 |
-
#st.info(file_paths[i])
|
| 396 |
with st.spinner(text=f'Processing File {i+1} of {totalFiles}'):
|
| 397 |
annotations, totalSeconds = processFile(file_paths[i])
|
| 398 |
print(f"Finished processing {file_paths[i]}")
|
|
@@ -406,18 +459,69 @@ else:
|
|
| 406 |
with st.spinner(text=f'Analyzing File {i+1} of {totalFiles}'):
|
| 407 |
analyze(file_names[i])
|
| 408 |
print(f"Finished analyzing {file_paths[i]}")
|
|
|
|
| 409 |
st.success(f"Took {time.time() - start_time} seconds to analyze {totalFiles} files!")
|
| 410 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
currFile = st.sidebar.selectbox('Current File', file_names,on_change=updateMultiSelect,key="select_currFile")
|
|
|
|
| 412 |
if currFile is None and len(st.session_state.results) > 0 and len(st.session_state.results[0]) > 0:
|
| 413 |
st.write("Select a file to view from the sidebar")
|
| 414 |
try:
|
| 415 |
st.session_state.resetResult = False
|
| 416 |
currFileIndex = file_names.index(currFile)
|
|
|
|
| 417 |
if len(st.session_state.results) > currFileIndex and len(st.session_state.summaries) > currFileIndex and len(st.session_state.results[currFileIndex]) > 0:
|
|
|
|
|
|
|
|
|
|
| 418 |
# Handle
|
| 419 |
currAnnotation, currTotalTime = st.session_state.results[currFileIndex]
|
| 420 |
speakerNames = currAnnotation.labels()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 421 |
# Update other categories
|
| 422 |
unusedSpeakers = st.session_state.unusedSpeakers[currFileIndex]
|
| 423 |
categorySelections = st.session_state["categorySelect"][currFileIndex]
|
|
@@ -435,6 +539,11 @@ try:
|
|
| 435 |
|
| 436 |
newCategory = st.sidebar.text_input('Add category', key='categoryInput',on_change=addCategory)
|
| 437 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 438 |
df4_dict = {}
|
| 439 |
nameList = st.session_state.categories
|
| 440 |
extraNames = []
|
|
@@ -456,176 +565,380 @@ try:
|
|
| 456 |
df4 = pd.DataFrame(data=df4_dict)
|
| 457 |
df4.name = "df4"
|
| 458 |
st.session_state.summaries[currFileIndex]["df4"] = df4
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 481 |
names = "labels",
|
| 482 |
-
ids = "ids",
|
| 483 |
parents = "parents",
|
|
|
|
| 484 |
values = "percentiles",
|
| 485 |
custom_data=['labels','valueStrings','percentiles','parentNames','parentPercentiles'],
|
| 486 |
-
color
|
| 487 |
-
title="
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
],
|
| 537 |
-
xaxis=dict(
|
| 538 |
-
range=[dt.datetime.combine(dt.date.today(), dt.time.min),dt.datetime.combine(dt.date.today(), timeMax)]
|
| 539 |
-
),
|
| 540 |
-
xaxis_title="Time",
|
| 541 |
-
yaxis_title="Speaker",
|
| 542 |
-
legend_title=None
|
| 543 |
-
)
|
| 544 |
-
|
| 545 |
-
st.plotly_chart(fig_la, use_container_width=True)
|
| 546 |
-
|
| 547 |
-
fig2_la = px.bar(df2, x="values", y="names", color="names", orientation='h',
|
| 548 |
-
custom_data=["names","values"],title="Time Spoken by each Speaker")
|
| 549 |
-
fig2_la.update_xaxes(ticksuffix="%")
|
| 550 |
-
fig2_la.update_yaxes(autorange="reversed")
|
| 551 |
-
fig2_la.update_layout(
|
| 552 |
-
xaxis_title="Percentage Time Spoken",
|
| 553 |
-
yaxis_title="Speaker",
|
| 554 |
-
legend_title=None
|
| 555 |
|
| 556 |
-
|
| 557 |
-
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
|
| 562 |
-
|
| 563 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 564 |
|
| 565 |
except ValueError:
|
| 566 |
pass
|
| 567 |
|
| 568 |
if len(st.session_state.results) > 0:
|
| 569 |
-
with st.
|
| 570 |
-
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 578 |
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
|
| 605 |
-
|
| 606 |
-
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
| 613 |
-
|
| 614 |
-
df7_dict[category] = []
|
| 615 |
-
for resultID,summary in enumerate(st.session_state.summaries):
|
| 616 |
-
partialDf = summary["df5"]
|
| 617 |
-
for i in range(len(voiceNames)):
|
| 618 |
-
df7_dict[voiceNames[i]].append(partialDf["percentiles"][i])
|
| 619 |
-
df7 = pd.DataFrame(df7_dict)
|
| 620 |
-
sorted_df7 = df7.sort_values(by=['One Voice', 'Multi Voice'])
|
| 621 |
-
summFig2 = px.bar(sorted_df7, x="files", y=["One Voice","Multi Voice","No Voice",],title="Cross-file Voice Categories sorted for One Voice")
|
| 622 |
-
st.plotly_chart(summFig2, use_container_width=True)
|
| 623 |
-
sorted_df7_3 = df7.sort_values(by=['Multi Voice','One Voice'])
|
| 624 |
-
summFig3 = px.bar(sorted_df7_3, x="files", y=["One Voice","Multi Voice","No Voice",],title="Cross-file Voice Categories sorted for Multi Voice")
|
| 625 |
-
st.plotly_chart(summFig3, use_container_width=True)
|
| 626 |
-
sorted_df7_4 = df7.sort_values(by=['No Voice', 'Multi Voice'],ascending=False)
|
| 627 |
-
summFig4 = px.bar(sorted_df7_4, x="files", y=["One Voice","Multi Voice","No Voice",],title="Cross-file Voice Categories sorted for Any Voice")
|
| 628 |
-
st.plotly_chart(summFig4, use_container_width=True)
|
| 629 |
|
| 630 |
|
| 631 |
|
|
@@ -643,14 +956,25 @@ if st.button("Upload Files to Dataset"):
|
|
| 643 |
def convert_df(df):
|
| 644 |
return df.to_csv(index=False).encode('utf-8')
|
| 645 |
|
| 646 |
-
|
| 647 |
-
|
| 648 |
-
|
| 649 |
-
|
| 650 |
-
st.
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
enableDenoise = False
|
| 27 |
earlyCleanup = True
|
| 28 |
|
| 29 |
+
# [None,Low,Medium,High,Debug]
|
| 30 |
+
# [0,1,2,3,4]
|
| 31 |
+
verbosity=4
|
| 32 |
+
|
| 33 |
+
config = {
|
| 34 |
+
'displayModeBar': True,
|
| 35 |
+
'modeBarButtonsToRemove':[],
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
def printV(message,verbosityLevel):
|
| 39 |
+
global verbosity
|
| 40 |
+
if verbosity>=verbosityLevel:
|
| 41 |
+
print(message)
|
| 42 |
+
|
| 43 |
+
@st.cache_data
|
| 44 |
+
def convert_df(df):
|
| 45 |
+
return df.to_csv(index=False).encode('utf-8')
|
| 46 |
+
|
| 47 |
def save_data(
|
| 48 |
config_dict: Dict[str,str], audio_paths: List[str], userid: str,
|
| 49 |
) -> None:
|
|
|
|
| 156 |
currFileIndex = file_names.index(inFileName)
|
| 157 |
print(f"Found at index {currFileIndex}")
|
| 158 |
if len(st.session_state.results) > currFileIndex and len(st.session_state.summaries) > currFileIndex and len(st.session_state.results[currFileIndex]) > 0:
|
| 159 |
+
|
| 160 |
+
printV(f'In if',4)
|
| 161 |
# Handle
|
| 162 |
currAnnotation, currTotalTime = st.session_state.results[currFileIndex]
|
| 163 |
speakerNames = currAnnotation.labels()
|
| 164 |
+
printV(f'Loaded results',4)
|
| 165 |
# Update other categories
|
| 166 |
unusedSpeakers = st.session_state.unusedSpeakers[currFileIndex]
|
| 167 |
categorySelections = st.session_state["categorySelect"][currFileIndex]
|
| 168 |
+
printV(f'Loaded speaker selections',4)
|
| 169 |
noVoice, oneVoice, multiVoice = su.calcSpeakingTypes(currAnnotation,currTotalTime)
|
| 170 |
sumNoVoice = su.sumTimes(noVoice)
|
| 171 |
sumOneVoice = su.sumTimes(oneVoice)
|
| 172 |
sumMultiVoice = su.sumTimes(multiVoice)
|
| 173 |
+
printV(f'Calculated speaking types',4)
|
| 174 |
|
| 175 |
df3 = pd.DataFrame(
|
| 176 |
{
|
|
|
|
| 182 |
)
|
| 183 |
df3.name = "df3"
|
| 184 |
st.session_state.summaries[currFileIndex]["df3"] = df3
|
| 185 |
+
printV(f'Set df3',4)
|
| 186 |
|
| 187 |
df4_dict = {}
|
| 188 |
nameList = st.session_state.categories
|
|
|
|
| 203 |
else:
|
| 204 |
extraNames.append(sp)
|
| 205 |
extraValues.append(su.sumTimes(currAnnotation.subset([sp])))
|
| 206 |
+
extraPairsSorted = sorted(zip(extraNames, extraValues), key=lambda pair: pair[0])
|
| 207 |
+
extraNames, extraValues = zip(*extraPairsSorted)
|
| 208 |
df4_dict = {
|
| 209 |
+
"values": valueList+list(extraValues),
|
| 210 |
+
"names": nameList+list(extraNames),
|
| 211 |
}
|
| 212 |
df4 = pd.DataFrame(data=df4_dict)
|
| 213 |
df4.name = "df4"
|
| 214 |
st.session_state.summaries[currFileIndex]["df4"] = df4
|
| 215 |
+
|
| 216 |
+
printV(f'Set df4',4)
|
| 217 |
|
| 218 |
speakerList,timeList = su.sumTimesPerSpeaker(oneVoice)
|
| 219 |
multiSpeakerList, multiTimeList = su.sumMultiTimesPerSpeaker(multiVoice)
|
|
|
|
| 254 |
)
|
| 255 |
df5.name = "df5"
|
| 256 |
st.session_state.summaries[currFileIndex]["df5"] = df5
|
| 257 |
+
printV(f'Set df5',4)
|
| 258 |
|
| 259 |
speakers_dataFrame,speakers_times = su.annotationToDataFrame(currAnnotation)
|
| 260 |
st.session_state.summaries[currFileIndex]["speakers_dataFrame"] = speakers_dataFrame
|
|
|
|
| 266 |
}
|
| 267 |
df2 = pd.DataFrame(df2_dict)
|
| 268 |
st.session_state.summaries[currFileIndex]["df2"] = df2
|
| 269 |
+
printV(f'Set df2',4)
|
| 270 |
except ValueError as e:
|
| 271 |
print(f"Value Error: {e}")
|
| 272 |
pass
|
|
|
|
| 318 |
if 'summaries' not in st.session_state:
|
| 319 |
st.session_state.summaries = []
|
| 320 |
if 'categories' not in st.session_state:
|
| 321 |
+
st.session_state.categories = []
|
| 322 |
st.session_state.categorySelect = []
|
| 323 |
# Single Use
|
| 324 |
if 'removeCategory' not in st.session_state:
|
|
|
|
| 330 |
st.session_state.unusedSpeakers = []
|
| 331 |
if 'file_names' not in st.session_state:
|
| 332 |
st.session_state.file_names = []
|
| 333 |
+
if 'showSummary' not in st.session_state:
|
| 334 |
+
st.session_state.showSummary = 'No'
|
| 335 |
|
| 336 |
|
| 337 |
|
|
|
|
| 344 |
|
| 345 |
uploaded_file_paths = st.file_uploader("Upload an audio of classroom activity to analyze", accept_multiple_files=True)
|
| 346 |
|
| 347 |
+
supported_file_types = ('.wav','.mp3','.mp4','.txt','.rttm','.csv')
|
| 348 |
+
viewChoices = ["Voice Categories","Custom Categories","Detailed Voice Categories","Voice Category Treemap","Speaker Timeline","Time per Speaker"]
|
| 349 |
|
| 350 |
valid_files = []
|
| 351 |
file_paths = []
|
|
|
|
| 359 |
file_names = []
|
| 360 |
# Reset valid_files?
|
| 361 |
for uploaded_file in uploaded_file_paths:
|
| 362 |
+
if not uploaded_file.name.lower().endswith(supported_file_types):
|
| 363 |
st.error('File must be of type: {}'.format(supported_file_types))
|
| 364 |
uploaded_file = None
|
| 365 |
else:
|
|
|
|
| 384 |
st.session_state.categorySelect.append(tempCategories)
|
| 385 |
while (len(st.session_state.summaries) < len(valid_files)):
|
| 386 |
st.session_state.summaries.append([])
|
| 387 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 388 |
st.session_state.file_names = file_names
|
| 389 |
|
| 390 |
file_names = st.session_state.file_names
|
|
|
|
| 403 |
if len(st.session_state.results) > i and len(st.session_state.results[i]) > 0:
|
| 404 |
continue
|
| 405 |
# Text files use sample data
|
| 406 |
+
if file_paths[i].lower().endswith('.txt'):
|
| 407 |
with st.spinner(text=f'Loading Demo File {i+1} of {totalFiles}'):
|
|
|
|
| 408 |
# RTTM load as filler
|
| 409 |
+
speakerList, annotations = su.loadAudioTXT(file_paths[i])
|
| 410 |
+
printV(annotations,4)
|
| 411 |
+
# Approximate total seconds
|
| 412 |
+
totalSeconds = 0
|
| 413 |
+
for segment in annotations.itersegments():
|
| 414 |
+
if segment.end > totalSeconds:
|
| 415 |
+
totalSeconds = segment.end
|
| 416 |
+
st.session_state.results[i] = (annotations, totalSeconds)
|
| 417 |
+
st.session_state.summaries[i] = {}
|
| 418 |
+
speakerNames = annotations.labels()
|
| 419 |
+
st.session_state.unusedSpeakers[i] = speakerNames
|
| 420 |
+
elif file_paths[i].lower().endswith('.rttm'):
|
| 421 |
+
with st.spinner(text=f'Loading File {i+1} of {totalFiles}'):
|
| 422 |
+
# RTTM load as filler
|
| 423 |
+
speakerList, annotations = su.loadAudioRTTM(file_paths[i])
|
| 424 |
+
printV(annotations,4)
|
| 425 |
+
# Approximate total seconds
|
| 426 |
+
totalSeconds = 0
|
| 427 |
+
for segment in annotations.itersegments():
|
| 428 |
+
if segment.end > totalSeconds:
|
| 429 |
+
totalSeconds = segment.end
|
| 430 |
+
st.session_state.results[i] = (annotations, totalSeconds)
|
| 431 |
+
st.session_state.summaries[i] = {}
|
| 432 |
+
speakerNames = annotations.labels()
|
| 433 |
+
st.session_state.unusedSpeakers[i] = speakerNames
|
| 434 |
+
elif file_paths[i].lower().endswith('.csv'):
|
| 435 |
+
with st.spinner(text=f'Loading File {i+1} of {totalFiles}'):
|
| 436 |
+
# RTTM load as filler
|
| 437 |
+
speakerList, annotations = su.loadAudioCSV(file_paths[i])
|
| 438 |
+
printV(annotations,4)
|
| 439 |
# Approximate total seconds
|
| 440 |
totalSeconds = 0
|
| 441 |
for segment in annotations.itersegments():
|
|
|
|
| 446 |
speakerNames = annotations.labels()
|
| 447 |
st.session_state.unusedSpeakers[i] = speakerNames
|
| 448 |
else:
|
|
|
|
| 449 |
with st.spinner(text=f'Processing File {i+1} of {totalFiles}'):
|
| 450 |
annotations, totalSeconds = processFile(file_paths[i])
|
| 451 |
print(f"Finished processing {file_paths[i]}")
|
|
|
|
| 459 |
with st.spinner(text=f'Analyzing File {i+1} of {totalFiles}'):
|
| 460 |
analyze(file_names[i])
|
| 461 |
print(f"Finished analyzing {file_paths[i]}")
|
| 462 |
+
print(f"Took {time.time() - start_time} seconds to analyze {totalFiles} files!")
|
| 463 |
st.success(f"Took {time.time() - start_time} seconds to analyze {totalFiles} files!")
|
| 464 |
+
|
| 465 |
+
t = '''st.write(st.__version__)
|
| 466 |
+
c1,c2,c3 = st.columns(3)
|
| 467 |
+
with c1:
|
| 468 |
+
st.write("Left")
|
| 469 |
+
with c2:
|
| 470 |
+
st.write("Middle")
|
| 471 |
+
with c3:
|
| 472 |
+
st.write("Right")
|
| 473 |
+
t1, t2 = st.tabs(["Left","Right"])
|
| 474 |
+
with t1:
|
| 475 |
+
c1t1, c2t1 = st.columns(2)
|
| 476 |
+
with c1t1:
|
| 477 |
+
st.write("Left Left")
|
| 478 |
+
st.button("Hi")
|
| 479 |
+
f = open("test.txt",'w+')
|
| 480 |
+
f.write("Hello there")
|
| 481 |
+
f.close()
|
| 482 |
+
with open("test.txt") as f:
|
| 483 |
+
st.download_button(
|
| 484 |
+
"Save As PDF",
|
| 485 |
+
f,
|
| 486 |
+
'sonogram-test.txt',
|
| 487 |
+
key='download-txt',
|
| 488 |
+
on_click="ignore"
|
| 489 |
+
)
|
| 490 |
+
with c2t1:
|
| 491 |
+
st.write("Left Right")
|
| 492 |
+
st.button("There")
|
| 493 |
+
f = open("test2.txt",'w+')
|
| 494 |
+
f.write("Hello there")
|
| 495 |
+
f.close()
|
| 496 |
+
with open("test2.txt") as f:
|
| 497 |
+
st.download_button(
|
| 498 |
+
"Save As PDF",
|
| 499 |
+
f,
|
| 500 |
+
'sonogram-test2.txt',
|
| 501 |
+
key='download-txt2',
|
| 502 |
+
on_click="ignore",
|
| 503 |
+
)'''
|
| 504 |
+
|
| 505 |
currFile = st.sidebar.selectbox('Current File', file_names,on_change=updateMultiSelect,key="select_currFile")
|
| 506 |
+
|
| 507 |
if currFile is None and len(st.session_state.results) > 0 and len(st.session_state.results[0]) > 0:
|
| 508 |
st.write("Select a file to view from the sidebar")
|
| 509 |
try:
|
| 510 |
st.session_state.resetResult = False
|
| 511 |
currFileIndex = file_names.index(currFile)
|
| 512 |
+
currPlainName = currFile.split('.')[0]
|
| 513 |
if len(st.session_state.results) > currFileIndex and len(st.session_state.summaries) > currFileIndex and len(st.session_state.results[currFileIndex]) > 0:
|
| 514 |
+
st.header(f"Analysis of file {currFile}")
|
| 515 |
+
graphNames = ["Data","Voice Categories","Speaker Percentage","Speakers with Categories","Treemap","Timeline","Time Spoken"]
|
| 516 |
+
dataTab, pie1, pie2, sunburst1, treemap1, timeline, bar1 = st.tabs(graphNames)
|
| 517 |
# Handle
|
| 518 |
currAnnotation, currTotalTime = st.session_state.results[currFileIndex]
|
| 519 |
speakerNames = currAnnotation.labels()
|
| 520 |
+
|
| 521 |
+
speakers_dataFrame = st.session_state.summaries[currFileIndex]["speakers_dataFrame"]
|
| 522 |
+
currDF, _ = su.annotationToSimpleDataFrame(currAnnotation)
|
| 523 |
+
speakers_times = st.session_state.summaries[currFileIndex]["speakers_times"]
|
| 524 |
+
|
| 525 |
# Update other categories
|
| 526 |
unusedSpeakers = st.session_state.unusedSpeakers[currFileIndex]
|
| 527 |
categorySelections = st.session_state["categorySelect"][currFileIndex]
|
|
|
|
| 539 |
|
| 540 |
newCategory = st.sidebar.text_input('Add category', key='categoryInput',on_change=addCategory)
|
| 541 |
|
| 542 |
+
catTypeColors = su.colorsCSS(3)
|
| 543 |
+
allColors = su.colorsCSS(len(speakerNames)+len(st.session_state.categories))
|
| 544 |
+
speakerColors = allColors[:len(speakerNames)]
|
| 545 |
+
catColors = allColors[len(speakerNames):]
|
| 546 |
+
|
| 547 |
df4_dict = {}
|
| 548 |
nameList = st.session_state.categories
|
| 549 |
extraNames = []
|
|
|
|
| 565 |
df4 = pd.DataFrame(data=df4_dict)
|
| 566 |
df4.name = "df4"
|
| 567 |
st.session_state.summaries[currFileIndex]["df4"] = df4
|
| 568 |
+
|
| 569 |
+
with dataTab:
|
| 570 |
+
csv = convert_df(currDF)
|
| 571 |
+
|
| 572 |
+
st.download_button(
|
| 573 |
+
"Press to Download analysis data",
|
| 574 |
+
csv,
|
| 575 |
+
'sonogram-analysis-'+currPlainName+'.csv',
|
| 576 |
+
"text/csv",
|
| 577 |
+
key='download-csv',
|
| 578 |
+
on_click="ignore",
|
| 579 |
+
)
|
| 580 |
+
st.dataframe(currDF)
|
| 581 |
+
with pie1:
|
| 582 |
+
printV("In Pie1",4)
|
| 583 |
+
df3 = st.session_state.summaries[currFileIndex]["df3"]
|
| 584 |
+
fig1 = go.Figure()
|
| 585 |
+
fig1.update_layout(
|
| 586 |
+
title_text="Percentage of each Voice Category",
|
| 587 |
+
colorway=catTypeColors,
|
| 588 |
+
plot_bgcolor='rgba(0, 0, 0, 0)',
|
| 589 |
+
paper_bgcolor='rgba(0, 0, 0, 0)',
|
| 590 |
+
)
|
| 591 |
+
printV("Pie1 Pretrace",4)
|
| 592 |
+
fig1.add_trace(go.Pie(values=df3["values"],labels=df3["names"],sort=False))
|
| 593 |
+
printV("Pie1 Posttrace",4)
|
| 594 |
+
|
| 595 |
+
col1_1, col1_2 = st.columns(2)
|
| 596 |
+
fig1.write_image("ascn_pie1.pdf")
|
| 597 |
+
fig1.write_image("ascn_pie1.svg")
|
| 598 |
+
printV("Pie1 files written",4)
|
| 599 |
+
with col1_1:
|
| 600 |
+
printV("Pie1 in col1_1",4)
|
| 601 |
+
with open('ascn_pie1.pdf','rb') as f:
|
| 602 |
+
printV("Pie1 in file open",4)
|
| 603 |
+
st.download_button(
|
| 604 |
+
"Save As PDF",
|
| 605 |
+
f,
|
| 606 |
+
'sonogram-voice-category-'+currPlainName+'.pdf',
|
| 607 |
+
'application/pdf',
|
| 608 |
+
key='download-pdf1',
|
| 609 |
+
on_click="ignore",
|
| 610 |
+
)
|
| 611 |
+
printV("Pie1 after col1_1",4)
|
| 612 |
+
with col1_2:
|
| 613 |
+
with open('ascn_pie1.svg','rb') as f:
|
| 614 |
+
st.download_button(
|
| 615 |
+
"Save As SVG",
|
| 616 |
+
f,
|
| 617 |
+
'sonogram-voice-category-'+currPlainName+'.svg',
|
| 618 |
+
'image/svg+xml',
|
| 619 |
+
key='download-svg1',
|
| 620 |
+
on_click="ignore",
|
| 621 |
+
)
|
| 622 |
+
printV("Pie1 in col1_2",4)
|
| 623 |
+
st.plotly_chart(fig1, use_container_width=True,config=config)
|
| 624 |
+
printV("Pie1 post plotly",4)
|
| 625 |
+
|
| 626 |
+
with pie2:
|
| 627 |
+
df4 = st.session_state.summaries[currFileIndex]["df4"]
|
| 628 |
+
|
| 629 |
+
# Some speakers may be missing, so fix colors
|
| 630 |
+
figColors = []
|
| 631 |
+
for n in df4["names"]:
|
| 632 |
+
if n in speakerNames:
|
| 633 |
+
figColors.append(speakerColors[speakerNames.index(n)])
|
| 634 |
+
fig2 = go.Figure()
|
| 635 |
+
fig2.update_layout(
|
| 636 |
+
title_text="Percentage of Speakers and Custom Categories",
|
| 637 |
+
colorway=catColors+figColors,
|
| 638 |
+
plot_bgcolor='rgba(0, 0, 0, 0)',
|
| 639 |
+
paper_bgcolor='rgba(0, 0, 0, 0)',
|
| 640 |
+
)
|
| 641 |
+
fig2.add_trace(go.Pie(values=df4["values"],labels=df4["names"],sort=False))
|
| 642 |
+
|
| 643 |
+
col2_1, col2_2 = st.columns(2)
|
| 644 |
+
fig2.write_image("ascn_pie2.pdf")
|
| 645 |
+
fig2.write_image("ascn_pie2.svg")
|
| 646 |
+
with col2_1:
|
| 647 |
+
with open('ascn_pie2.pdf','rb') as f:
|
| 648 |
+
st.download_button(
|
| 649 |
+
"Save As PDF",
|
| 650 |
+
f,
|
| 651 |
+
'sonogram-speaker-percent-'+currPlainName+'.pdf',
|
| 652 |
+
'application/pdf',
|
| 653 |
+
key='download-pdf2',
|
| 654 |
+
on_click="ignore",
|
| 655 |
+
)
|
| 656 |
+
with col2_2:
|
| 657 |
+
with open('ascn_pie2.svg','rb') as f:
|
| 658 |
+
st.download_button(
|
| 659 |
+
"Save As SVG",
|
| 660 |
+
f,
|
| 661 |
+
'sonogram-speaker-percent-'+currPlainName+'.svg',
|
| 662 |
+
'image/svg+xml',
|
| 663 |
+
key='download-svg2',
|
| 664 |
+
on_click="ignore",
|
| 665 |
+
)
|
| 666 |
+
st.plotly_chart(fig2, use_container_width=True,config=config)
|
| 667 |
+
|
| 668 |
+
with sunburst1:
|
| 669 |
+
df5 = st.session_state.summaries[currFileIndex]["df5"]
|
| 670 |
+
fig3_1 = px.sunburst(df5,
|
| 671 |
+
branchvalues = 'total',
|
| 672 |
+
names = "labels",
|
| 673 |
+
ids = "ids",
|
| 674 |
+
parents = "parents",
|
| 675 |
+
values = "percentiles",
|
| 676 |
+
custom_data=['labels','valueStrings','percentiles','parentNames','parentPercentiles'],
|
| 677 |
+
color = 'labels',
|
| 678 |
+
title="Percentage of each Voice Category with Speakers",
|
| 679 |
+
color_discrete_sequence=catTypeColors+speakerColors,
|
| 680 |
+
)
|
| 681 |
+
fig3_1.update_traces(
|
| 682 |
+
hovertemplate="<br>".join([
|
| 683 |
+
'<b>%{customdata[0]}</b>',
|
| 684 |
+
'Duration: %{customdata[1]}s',
|
| 685 |
+
'Percentage of Total: %{customdata[2]:.2f}%',
|
| 686 |
+
'Parent: %{customdata[3]}',
|
| 687 |
+
'Percentage of Parent: %{customdata[4]:.2f}%'
|
| 688 |
+
])
|
| 689 |
+
)
|
| 690 |
+
fig3_1.update_layout(
|
| 691 |
+
plot_bgcolor='rgba(0, 0, 0, 0)',
|
| 692 |
+
paper_bgcolor='rgba(0, 0, 0, 0)',
|
| 693 |
+
)
|
| 694 |
+
|
| 695 |
+
col3_1, col3_2 = st.columns(2)
|
| 696 |
+
fig3_1.write_image("ascn_sunburst.pdf")
|
| 697 |
+
fig3_1.write_image("ascn_sunburst.svg")
|
| 698 |
+
with col3_1:
|
| 699 |
+
with open('ascn_sunburst.pdf','rb') as f:
|
| 700 |
+
st.download_button(
|
| 701 |
+
"Save As PDF",
|
| 702 |
+
f,
|
| 703 |
+
'sonogram-speaker-categories-'+currPlainName+'.pdf',
|
| 704 |
+
'application/pdf',
|
| 705 |
+
key='download-pdf3',
|
| 706 |
+
on_click="ignore",
|
| 707 |
+
)
|
| 708 |
+
with col3_2:
|
| 709 |
+
with open('ascn_sunburst.svg','rb') as f:
|
| 710 |
+
st.download_button(
|
| 711 |
+
"Save As SVG",
|
| 712 |
+
f,
|
| 713 |
+
'sonogram-speaker-categories-'+currPlainName+'.svg',
|
| 714 |
+
'image/svg+xml',
|
| 715 |
+
key='download-svg3',
|
| 716 |
+
on_click="ignore",
|
| 717 |
+
)
|
| 718 |
+
st.plotly_chart(fig3_1, use_container_width=True,config=config)
|
| 719 |
+
|
| 720 |
+
with treemap1:
|
| 721 |
+
df5 = st.session_state.summaries[currFileIndex]["df5"]
|
| 722 |
+
fig3 = px.treemap(df5,
|
| 723 |
+
branchvalues = "total",
|
| 724 |
names = "labels",
|
|
|
|
| 725 |
parents = "parents",
|
| 726 |
+
ids="ids",
|
| 727 |
values = "percentiles",
|
| 728 |
custom_data=['labels','valueStrings','percentiles','parentNames','parentPercentiles'],
|
| 729 |
+
color='labels',
|
| 730 |
+
title="Division of Speakers in each Voice Category",
|
| 731 |
+
color_discrete_sequence=catTypeColors+speakerColors,
|
| 732 |
+
)
|
| 733 |
+
fig3.update_traces(
|
| 734 |
+
hovertemplate="<br>".join([
|
| 735 |
+
'<b>%{customdata[0]}</b>',
|
| 736 |
+
'Duration: %{customdata[1]}s',
|
| 737 |
+
'Percentage of Total: %{customdata[2]:.2f}%',
|
| 738 |
+
'Parent: %{customdata[3]}',
|
| 739 |
+
'Percentage of Parent: %{customdata[4]:.2f}%'
|
| 740 |
+
])
|
| 741 |
+
)
|
| 742 |
+
fig3.update_layout(
|
| 743 |
+
plot_bgcolor='rgba(0, 0, 0, 0)',
|
| 744 |
+
paper_bgcolor='rgba(0, 0, 0, 0)',
|
| 745 |
+
)
|
| 746 |
+
|
| 747 |
+
col4_1, col4_2 = st.columns(2)
|
| 748 |
+
fig3.write_image("ascn_treemap.pdf")
|
| 749 |
+
fig3.write_image("ascn_treemap.svg")
|
| 750 |
+
with col4_1:
|
| 751 |
+
with open('ascn_treemap.pdf','rb') as f:
|
| 752 |
+
st.download_button(
|
| 753 |
+
"Save As PDF",
|
| 754 |
+
f,
|
| 755 |
+
'sonogram-treemap-'+currPlainName+'.pdf',
|
| 756 |
+
'application/pdf',
|
| 757 |
+
key='download-pdf4',
|
| 758 |
+
on_click="ignore",
|
| 759 |
+
)
|
| 760 |
+
with col4_2:
|
| 761 |
+
with open('ascn_treemap.svg','rb') as f:
|
| 762 |
+
st.download_button(
|
| 763 |
+
"Save As SVG",
|
| 764 |
+
f,
|
| 765 |
+
'sonogram-treemap-'+currPlainName+'.svg',
|
| 766 |
+
'image/svg+xml',
|
| 767 |
+
key='download-svg4',
|
| 768 |
+
on_click="ignore",
|
| 769 |
+
)
|
| 770 |
+
st.plotly_chart(fig3, use_container_width=True,config=config)
|
| 771 |
+
|
| 772 |
+
# generate plotting window
|
| 773 |
+
|
| 774 |
+
|
| 775 |
+
with timeline:
|
| 776 |
+
fig_la = px.timeline(speakers_dataFrame, x_start="Start", x_end="Finish", y="Resource", color="Resource",title="Timeline of Audio with Speakers",
|
| 777 |
+
color_discrete_sequence=speakerColors)
|
| 778 |
+
fig_la.update_yaxes(autorange="reversed")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 779 |
|
| 780 |
+
hMax = int(currTotalTime//3600)
|
| 781 |
+
mMax = int(currTotalTime%3600//60)
|
| 782 |
+
sMax = int(currTotalTime%60)
|
| 783 |
+
msMax = int(currTotalTime*1000000%1000000)
|
| 784 |
+
timeMax = dt.time(hMax,mMax,sMax,msMax)
|
| 785 |
+
|
| 786 |
+
fig_la.update_layout(
|
| 787 |
+
xaxis_tickformatstops = [
|
| 788 |
+
dict(dtickrange=[None, 1000], value="%H:%M:%S.%L"),
|
| 789 |
+
dict(dtickrange=[1000, None], value="%H:%M:%S")
|
| 790 |
+
],
|
| 791 |
+
xaxis=dict(
|
| 792 |
+
range=[dt.datetime.combine(dt.date.today(), dt.time.min),dt.datetime.combine(dt.date.today(), timeMax)]
|
| 793 |
+
),
|
| 794 |
+
xaxis_title="Time",
|
| 795 |
+
yaxis_title="Speaker",
|
| 796 |
+
legend_title=None,
|
| 797 |
+
plot_bgcolor='rgba(0, 0, 0, 0)',
|
| 798 |
+
paper_bgcolor='rgba(0, 0, 0, 0)',
|
| 799 |
+
legend={'traceorder':'reversed'},
|
| 800 |
+
yaxis= {'showticklabels': False},
|
| 801 |
+
)
|
| 802 |
+
|
| 803 |
+
col5_1, col5_2 = st.columns(2)
|
| 804 |
+
fig_la.write_image("ascn_timeline.pdf")
|
| 805 |
+
fig_la.write_image("ascn_timeline.svg")
|
| 806 |
+
with col5_1:
|
| 807 |
+
with open('ascn_timeline.pdf','rb') as f:
|
| 808 |
+
st.download_button(
|
| 809 |
+
"Save As PDF",
|
| 810 |
+
f,
|
| 811 |
+
'sonogram-timeline-'+currPlainName+'.pdf',
|
| 812 |
+
'application/pdf',
|
| 813 |
+
key='download-pdf5',
|
| 814 |
+
on_click="ignore",
|
| 815 |
+
)
|
| 816 |
+
with col5_2:
|
| 817 |
+
with open('ascn_timeline.svg','rb') as f:
|
| 818 |
+
st.download_button(
|
| 819 |
+
"Save As SVG",
|
| 820 |
+
f,
|
| 821 |
+
'sonogram-timeline-'+currPlainName+'.svg',
|
| 822 |
+
'image/svg+xml',
|
| 823 |
+
key='download-svg5',
|
| 824 |
+
on_click="ignore",
|
| 825 |
+
)
|
| 826 |
+
st.plotly_chart(fig_la, use_container_width=True,config=config)
|
| 827 |
+
|
| 828 |
+
with bar1:
|
| 829 |
+
df2 = st.session_state.summaries[currFileIndex]["df2"]
|
| 830 |
+
fig2_la = px.bar(df2, x="values", y="names", color="names", orientation='h',
|
| 831 |
+
custom_data=["names","values"],title="Time Spoken by each Speaker",
|
| 832 |
+
color_discrete_sequence=catColors+speakerColors)
|
| 833 |
+
fig2_la.update_xaxes(ticksuffix="%")
|
| 834 |
+
fig2_la.update_yaxes(autorange="reversed")
|
| 835 |
+
fig2_la.update_layout(
|
| 836 |
+
xaxis_title="Percentage Time Spoken",
|
| 837 |
+
yaxis_title="Speaker",
|
| 838 |
+
legend_title=None,
|
| 839 |
+
plot_bgcolor='rgba(0, 0, 0, 0)',
|
| 840 |
+
paper_bgcolor='rgba(0, 0, 0, 0)',
|
| 841 |
+
legend={'traceorder':'reversed'},
|
| 842 |
+
yaxis= {'showticklabels': False},
|
| 843 |
+
)
|
| 844 |
+
fig2_la.update_traces(
|
| 845 |
+
hovertemplate="<br>".join([
|
| 846 |
+
'<b>%{customdata[0]}</b>',
|
| 847 |
+
'Percentage of Time: %{customdata[1]:.2f}%'
|
| 848 |
+
])
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
+
col6_1, col6_2 = st.columns(2)
|
| 852 |
+
fig_la.write_image("ascn_bar.pdf")
|
| 853 |
+
fig_la.write_image("ascn_bar.svg")
|
| 854 |
+
with col6_1:
|
| 855 |
+
with open('ascn_bar.pdf','rb') as f:
|
| 856 |
+
st.download_button(
|
| 857 |
+
"Save As PDF",
|
| 858 |
+
f,
|
| 859 |
+
'sonogram-speaker-time-'+currPlainName+'.pdf',
|
| 860 |
+
'application/pdf',
|
| 861 |
+
key='download-pdf6',
|
| 862 |
+
on_click="ignore",
|
| 863 |
+
)
|
| 864 |
+
with col6_2:
|
| 865 |
+
with open('ascn_bar.svg','rb') as f:
|
| 866 |
+
st.download_button(
|
| 867 |
+
"Save As SVG",
|
| 868 |
+
f,
|
| 869 |
+
'sonogram-speaker-time-'+currPlainName+'.svg',
|
| 870 |
+
'image/svg+xml',
|
| 871 |
+
key='download-svg6',
|
| 872 |
+
on_click="ignore",
|
| 873 |
+
)
|
| 874 |
+
st.plotly_chart(fig2_la, use_container_width=True,config=config)
|
| 875 |
|
| 876 |
except ValueError:
|
| 877 |
pass
|
| 878 |
|
| 879 |
if len(st.session_state.results) > 0:
|
| 880 |
+
with st.expander("Multi-file Summary Data"):
|
| 881 |
+
st.header("Multi-file Summary Data")
|
| 882 |
+
with st.spinner(text='Processing summary results...'):
|
| 883 |
+
fileNames = st.session_state.file_names
|
| 884 |
+
results = []
|
| 885 |
+
indices = []
|
| 886 |
+
for i, resultTuple in enumerate(st.session_state.results):
|
| 887 |
+
if len(resultTuple) == 2:
|
| 888 |
+
results.append(resultTuple)
|
| 889 |
+
indices.append(i)
|
| 890 |
+
if len(indices) > 1:
|
| 891 |
+
|
| 892 |
+
df6_dict = {
|
| 893 |
+
"files":fileNames,
|
| 894 |
+
}
|
| 895 |
+
allCategories = copy.deepcopy(st.session_state.categories)
|
| 896 |
+
for i in indices:
|
| 897 |
+
currAnnotation, currTotalTime = st.session_state.results[i]
|
| 898 |
+
categorySelections = st.session_state["categorySelect"][i]
|
| 899 |
+
catSummary,extraCats = su.calcCategories(currAnnotation,categorySelections)
|
| 900 |
+
st.session_state.summaries[i]["categories"] = (catSummary,extraCats)
|
| 901 |
+
for extra in extraCats:
|
| 902 |
+
df6_dict[extra] = []
|
| 903 |
+
if extra not in allCategories:
|
| 904 |
+
allCategories.append(extra)
|
| 905 |
+
|
| 906 |
|
| 907 |
+
for category in st.session_state.categories:
|
| 908 |
+
df6_dict[category] = []
|
| 909 |
+
for i in indices:
|
| 910 |
+
summary, extras = st.session_state.summaries[i]["categories"]
|
| 911 |
+
theseCategories = st.session_state.categories + extras
|
| 912 |
+
for j, timeSlots in enumerate(summary):
|
| 913 |
+
df6_dict[theseCategories[j]].append(sum([t.duration for _,t in timeSlots])/st.session_state.results[i][1])
|
| 914 |
+
for category in allCategories:
|
| 915 |
+
if category not in theseCategories:
|
| 916 |
+
df6_dict[category].append(0)
|
| 917 |
+
df6 = pd.DataFrame(df6_dict)
|
| 918 |
+
summFig = px.bar(df6, x="files", y=allCategories,title="Time Spoken by Each Speaker in Each File")
|
| 919 |
+
st.plotly_chart(summFig, use_container_width=True,config=config)
|
| 920 |
+
|
| 921 |
+
|
| 922 |
+
voiceNames = ["No Voice","One Voice","Multi Voice"]
|
| 923 |
+
df7_dict = {
|
| 924 |
+
"files":fileNames,
|
| 925 |
+
}
|
| 926 |
+
for category in voiceNames:
|
| 927 |
+
df7_dict[category] = []
|
| 928 |
+
for resultID,summary in enumerate(st.session_state.summaries):
|
| 929 |
+
partialDf = summary["df5"]
|
| 930 |
+
for i in range(len(voiceNames)):
|
| 931 |
+
df7_dict[voiceNames[i]].append(partialDf["percentiles"][i])
|
| 932 |
+
df7 = pd.DataFrame(df7_dict)
|
| 933 |
+
sorted_df7 = df7.sort_values(by=['One Voice', 'Multi Voice'])
|
| 934 |
+
summFig2 = px.bar(sorted_df7, x="files", y=["One Voice","Multi Voice","No Voice",],title="Cross-file Voice Categories sorted for One Voice")
|
| 935 |
+
st.plotly_chart(summFig2, use_container_width=True,config=config)
|
| 936 |
+
sorted_df7_3 = df7.sort_values(by=['Multi Voice','One Voice'])
|
| 937 |
+
summFig3 = px.bar(sorted_df7_3, x="files", y=["One Voice","Multi Voice","No Voice",],title="Cross-file Voice Categories sorted for Multi Voice")
|
| 938 |
+
st.plotly_chart(summFig3, use_container_width=True,config=config)
|
| 939 |
+
sorted_df7_4 = df7.sort_values(by=['No Voice', 'Multi Voice'],ascending=False)
|
| 940 |
+
summFig4 = px.bar(sorted_df7_4, x="files", y=["One Voice","Multi Voice","No Voice",],title="Cross-file Voice Categories sorted for Any Voice")
|
| 941 |
+
st.plotly_chart(summFig4, use_container_width=True,config=config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 942 |
|
| 943 |
|
| 944 |
|
|
|
|
| 956 |
def convert_df(df):
|
| 957 |
return df.to_csv(index=False).encode('utf-8')
|
| 958 |
|
| 959 |
+
st.write("Would you like additional data, charts, or features? We would love to hear more from you [about our project!](https://forms.gle/A32CdfGYSZoMPyyX9)")
|
| 960 |
+
st.write("If you would like to learn more or work with us, please contact mtbaillie@ualr.edu")
|
| 961 |
+
|
| 962 |
+
with st.expander("Instructions and additional details"):
|
| 963 |
+
st.write("Thank you for viewing our experimental app! The overall presentations and features are expected to be improved over time, you can think of this as our first rough draft!")
|
| 964 |
+
st.write("To use this app:\n1. Upload an audio file for live analysis. Alternatively, you can upload an already generated [rttm file](https://stackoverflow.com/questions/30975084/rttm-file-format)")
|
| 965 |
+
st.write("2. Press Analyze All. Note that no data is saved on our side, so we will not have access to your recordings. Future versions of this app will support donating audio to us for aid in our research.")
|
| 966 |
+
st.write("3. Use the side bar on the left to select your file (may have to be expanded by clicking the > ). Our app supports uploading multiple files for more comprehensive analysis.")
|
| 967 |
+
st.write("4. Use the tabs provided to view different visualizations of your audio. Each example can be downloaded for personal use.")
|
| 968 |
+
st.write("4a. The graphs are built using [plotly](https://plotly.com/). This allows for a high degree of interaction. Feel free to experiment with the graphs, as you can always return to the original view by double-clicking on the graph. For more examples of easily supported visualizations, see [here](https://plotly.com/python/basic-charts/)")
|
| 969 |
+
|
| 970 |
+
with st.expander("(Potentially) FAQ"):
|
| 971 |
+
st.write(f"**1. I tried analyzing a file, but the page refreshed and nothing happened! Why?**\n\t")
|
| 972 |
+
st.write("You may need to select a file using the side bar on the left. This app supports multiple files, so we require that you select which file to view after analysis.")
|
| 973 |
+
st.write(f"**2. I don't see a sidebar! Where is it?**\n\t")
|
| 974 |
+
st.write("The side bar may start by being minimized. Press the '>' in the upper left to expand the side bar.")
|
| 975 |
+
st.write(f"**3. I still don't have a file to select in the dropdown! Why?**\n\t")
|
| 976 |
+
st.write("If you are sure that you have run Analyze All and after refresh no files may be selected, then your file is likely too large. We currently have a limitation of approximately 1.5 hours of audio. This is a known issue that requires additional time **or** money to solve, and is expected to be fixed by the next update of this app. Please be patient!")
|
| 977 |
+
st.write(f"**4. I want to be able to view my previously analyzed data! How can I do this?**\n\t")
|
| 978 |
+
st.write("You can download a CSV copy of the data using the first tab. From there, you can reupload the CSV copy at a later date to view the data visualizations without having to use your original audio file. Future versions of this app will support creating optional logins for long term storage and analysis.")
|
| 979 |
+
st.write(f"**5. The app says 'TOOL CURRENTLY USING CPU, ANALYSIS EXTREMELY SLOW' and takes forever to analyze audio! What is wrong?**\n\t")
|
| 980 |
+
st.write("We are currently in the process of securing funding to allow permanent public access to this tool. Until then, we can provide an interface to view already analyzed data without cost to you or us. While this mode will technically still work, it may take over a day to analyze your audio. Feel free to reach out to us to discuss temporary solutions to this until the app's funding is secured!")
|
requirements.txt
CHANGED
|
@@ -4,9 +4,10 @@ torch
|
|
| 4 |
#torch_xla[tpu] == 2.6.0
|
| 5 |
torchaudio
|
| 6 |
numpy
|
| 7 |
-
streamlit
|
| 8 |
opencv-python
|
| 9 |
pyannote.audio
|
| 10 |
matplotlib
|
| 11 |
plotly
|
| 12 |
-
deepfilternet
|
|
|
|
|
|
| 4 |
#torch_xla[tpu] == 2.6.0
|
| 5 |
torchaudio
|
| 6 |
numpy
|
| 7 |
+
streamlit == 1.45.1
|
| 8 |
opencv-python
|
| 9 |
pyannote.audio
|
| 10 |
matplotlib
|
| 11 |
plotly
|
| 12 |
+
deepfilternet
|
| 13 |
+
kaleido==0.1.0
|
sonogram_utility.py
CHANGED
|
@@ -25,6 +25,26 @@ def colors(n):
|
|
| 25 |
ret.append((bgr[0][0][0].item()/255,bgr[0][0][1].item()/255,bgr[0][0][2].item()/255))
|
| 26 |
return ret
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
def extendSpeakers(mySpeakerList, fileLabel = 'NONE', maximumSecondDifference = 1, minimumSecondDuration = 0):
|
| 29 |
'''
|
| 30 |
Assumes mySpeakerList is already split into Speaker/Audience
|
|
@@ -100,6 +120,43 @@ def loadAudioRTTM(sampleRTTM):
|
|
| 100 |
|
| 101 |
return speakerList, prediction
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
def splitIntoTimeSegments(testFile,maxDurationInSeconds=60):
|
| 104 |
|
| 105 |
waveform, sample_rate = torchaudio.load(testFile)
|
|
@@ -282,7 +339,30 @@ def annotationToDataFrame(myAnnotation):
|
|
| 282 |
df = pd.DataFrame(dataList)
|
| 283 |
return df, timeSummary
|
| 284 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 285 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
| 287 |
def calcCategories(myAnnotation,categories):
|
| 288 |
categorySlots = []
|
|
|
|
| 25 |
ret.append((bgr[0][0][0].item()/255,bgr[0][0][1].item()/255,bgr[0][0][2].item()/255))
|
| 26 |
return ret
|
| 27 |
|
| 28 |
+
def colorsCSS(n):
|
| 29 |
+
'''
|
| 30 |
+
Creates a list size n of distinctive colors based on CSS formatting
|
| 31 |
+
'''
|
| 32 |
+
if n == 0:
|
| 33 |
+
return []
|
| 34 |
+
ret = []
|
| 35 |
+
h = int(random.random() * 180)
|
| 36 |
+
step = 180 / n
|
| 37 |
+
for i in range(n):
|
| 38 |
+
h += step
|
| 39 |
+
h = int(h) % 180
|
| 40 |
+
hsv = np.uint8([[[h,200,200]]])
|
| 41 |
+
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
|
| 42 |
+
b = f'{bgr[0][0][0].item():02x}'
|
| 43 |
+
g = f'{bgr[0][0][1].item():02x}'
|
| 44 |
+
r = f'{bgr[0][0][2].item():02x}'
|
| 45 |
+
ret.append('#'+b+g+r)
|
| 46 |
+
return ret
|
| 47 |
+
|
| 48 |
def extendSpeakers(mySpeakerList, fileLabel = 'NONE', maximumSecondDifference = 1, minimumSecondDuration = 0):
|
| 49 |
'''
|
| 50 |
Assumes mySpeakerList is already split into Speaker/Audience
|
|
|
|
| 120 |
|
| 121 |
return speakerList, prediction
|
| 122 |
|
| 123 |
+
def loadAudioTXT(sampleTXT):
|
| 124 |
+
# Read in prediction data
|
| 125 |
+
# Data in list form, for convenient plotting
|
| 126 |
+
speakerList = []
|
| 127 |
+
# Data in Annotation form, for convenient error rate calculation
|
| 128 |
+
prediction = Annotation(uri=sampleTXT)
|
| 129 |
+
with open(sampleTXT, "r") as txt:
|
| 130 |
+
for line in txt:
|
| 131 |
+
speakerResult = line.split('\t')
|
| 132 |
+
print(speakerResult)
|
| 133 |
+
if len(speakerResult) < 3:
|
| 134 |
+
continue
|
| 135 |
+
index = -1
|
| 136 |
+
start = float(speakerResult[0])
|
| 137 |
+
end = float(speakerResult[1])
|
| 138 |
+
duration = end - start
|
| 139 |
+
prediction[Segment(start,end)] = speakerResult[2]
|
| 140 |
+
|
| 141 |
+
return [], prediction
|
| 142 |
+
|
| 143 |
+
def loadAudioCSV(sampleCSV):
|
| 144 |
+
# Read in prediction data
|
| 145 |
+
df = pd.read_csv(sampleCSV)
|
| 146 |
+
|
| 147 |
+
df = df.reset_index() # make sure indexes pair with number of rows
|
| 148 |
+
|
| 149 |
+
# Data in Annotation form, for convenient error rate calculation
|
| 150 |
+
prediction = Annotation(uri=sampleCSV)
|
| 151 |
+
|
| 152 |
+
for i, row in df.iterrows():
|
| 153 |
+
index = row['Resource']
|
| 154 |
+
start = row['Start']
|
| 155 |
+
end = row['Finish']
|
| 156 |
+
prediction[Segment(start,end)] = index
|
| 157 |
+
|
| 158 |
+
return [], prediction
|
| 159 |
+
|
| 160 |
def splitIntoTimeSegments(testFile,maxDurationInSeconds=60):
|
| 161 |
|
| 162 |
waveform, sample_rate = torchaudio.load(testFile)
|
|
|
|
| 339 |
df = pd.DataFrame(dataList)
|
| 340 |
return df, timeSummary
|
| 341 |
|
| 342 |
+
def annotationToSimpleDataFrame(myAnnotation):
|
| 343 |
+
dataList = []
|
| 344 |
+
speakerDict = {}
|
| 345 |
+
for currSpeaker in myAnnotation.labels():
|
| 346 |
+
if currSpeaker not in speakerDict.keys():
|
| 347 |
+
speakerDict[currSpeaker] = []
|
| 348 |
+
for currSegment in myAnnotation.subset([currSpeaker]).itersegments():
|
| 349 |
+
speakerDict[currSpeaker].append(currSegment)
|
| 350 |
|
| 351 |
+
timeSummary = {}
|
| 352 |
+
for key in speakerDict.keys():
|
| 353 |
+
if key not in timeSummary.keys():
|
| 354 |
+
timeSummary[key] = 0
|
| 355 |
+
for speakingSegment in speakerDict[key]:
|
| 356 |
+
timeSummary[key] += speakingSegment.duration
|
| 357 |
+
|
| 358 |
+
for key in speakerDict.keys():
|
| 359 |
+
for k, speakingSegment in enumerate(speakerDict[key]):
|
| 360 |
+
speakerName = key
|
| 361 |
+
startPoint = speakingSegment.start
|
| 362 |
+
endPoint = speakingSegment.end
|
| 363 |
+
dataList.append(dict(Task=speakerName + f".{k}", Start=startPoint, Finish=endPoint, Resource=speakerName))
|
| 364 |
+
df = pd.DataFrame(dataList)
|
| 365 |
+
return df, timeSummary
|
| 366 |
|
| 367 |
def calcCategories(myAnnotation,categories):
|
| 368 |
categorySlots = []
|