Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import matplotlib.pyplot as plt | |
| import numpy as np | |
| import torchaudio | |
| import sonogram_utility as su | |
| import time | |
| import ParquetScheduler as ps | |
| from pathlib import Path | |
| from typing import Any, Dict, List, Optional, Union | |
| import copy | |
| import datetime | |
| import tempfile | |
| import os | |
| import shutil | |
| import pandas as pd | |
| import plotly.express as px | |
| import plotly.graph_objects as go | |
| from plotly.subplots import make_subplots | |
| import torch | |
| #import torch_xla.core.xla_model as xm | |
| from pyannote.audio import Pipeline | |
| from pyannote.core import Annotation, Segment, Timeline | |
| from df.enhance import enhance, init_df | |
| import datetime as dt | |
| enableDenoise = False | |
| earlyCleanup = True | |
| # [None,Low,Medium,High,Debug] | |
| # [0,1,2,3,4] | |
| verbosity=4 | |
| config = { | |
| 'displayModeBar': True, | |
| 'modeBarButtonsToRemove':[], | |
| } | |
| def printV(message,verbosityLevel): | |
| global verbosity | |
| if verbosity>=verbosityLevel: | |
| print(message) | |
| def convert_df(df): | |
| return df.to_csv(index=False).encode('utf-8') | |
| def save_data( | |
| config_dict: Dict[str,str], audio_paths: List[str], userid: str, | |
| ) -> None: | |
| """Save data, i.e. move audio to a new folder and send paths+config to scheduler.""" | |
| save_dir = PARQUET_DATASET_DIR / f"{userid}" | |
| save_dir.mkdir(parents=True, exist_ok=True) | |
| data = copy.deepcopy(config_dict) | |
| # Add timestamp | |
| data["timestamp"] = datetime.datetime.utcnow().isoformat() | |
| # Copy and add audio | |
| for i,p in enumerate(audio_paths): | |
| name = f"{i:03d}" | |
| dst_path = save_dir / f"{name}{Path(p).suffix}" | |
| shutil.copyfile(p, dst_path) | |
| data[f"audio_{name}"] = dst_path | |
| # Send to scheduler | |
| scheduler.append(data) | |
| def processFile(filePath): | |
| global attenLimDb | |
| global gainWindow | |
| global minimumGain | |
| global maximumGain | |
| print("Loading file") | |
| waveformList, sampleRate = su.splitIntoTimeSegments(filePath,600) | |
| print("File loaded") | |
| enhancedWaveformList = [] | |
| if (enableDenoise): | |
| print("Denoising") | |
| for w in waveformList: | |
| if (enableDenoise): | |
| newW = enhance(dfModel,dfState,w,atten_lim_db=attenLimDB).detach().cpu() | |
| enhancedWaveformList.append(newW) | |
| else: | |
| enhancedWaveformList.append(w) | |
| if (enableDenoise): | |
| print("Audio denoised") | |
| waveformEnhanced = su.combineWaveforms(enhancedWaveformList) | |
| if (earlyCleanup): | |
| del enhancedWaveformList | |
| print("Equalizing Audio") | |
| waveform_gain_adjusted = su.equalizeVolume()(waveformEnhanced,sampleRate,gainWindow,minimumGain,maximumGain) | |
| if (earlyCleanup): | |
| del waveformEnhanced | |
| print("Audio Equalized") | |
| print("Detecting speakers") | |
| annotations = pipeline({"waveform": waveform_gain_adjusted, "sample_rate": sampleRate}) | |
| print("Speakers Detected") | |
| totalTimeInSeconds = int(waveform_gain_adjusted.shape[-1]/sampleRate) | |
| print("Time in seconds calculated") | |
| return annotations, totalTimeInSeconds | |
| def addCategory(): | |
| newCategory = st.session_state.categoryInput | |
| st.toast(f"Adding {newCategory}") | |
| st.session_state[f'multiselect_{newCategory}'] = [] | |
| st.session_state.categories.append(newCategory) | |
| st.session_state.categoryInput = '' | |
| for resultGroup in st.session_state.categorySelect: | |
| resultGroup.append([]) | |
| def removeCategory(index): | |
| categoryName = st.session_state.categories[index] | |
| st.toast(f"Removing {categoryName}") | |
| del st.session_state[f'multiselect_{categoryName}'] | |
| del st.session_state[f'remove_{categoryName}'] | |
| del st.session_state.categories[index] | |
| for resultGroup in st.session_state.categorySelect: | |
| del resultGroup[index] | |
| def updateCategoryOptions(resultIndex): | |
| if st.session_state.resetResult: | |
| #st.info(f"Skipping update of {resultIndex}") | |
| return | |
| #st.info(f"Updating result {resultIndex}") | |
| #st.info(f"In update: {st.session_state.categorySelect}") | |
| # Handle | |
| _, currAnnotation, _ = st.session_state.results[currFileIndex] | |
| speakerNames = currAnnotation.labels() | |
| # Handle speaker category sidebars | |
| unusedSpeakers = copy.deepcopy(speakerNames) | |
| # Remove used speakers | |
| for i, category in enumerate(st.session_state['categories']): | |
| category_choices = copy.deepcopy(st.session_state[f'multiselect_{category}']) | |
| st.session_state["categorySelect"][resultIndex][i] = category_choices | |
| for sp in category_choices: | |
| try: | |
| unusedSpeakers.remove(sp) | |
| except: | |
| continue | |
| st.session_state.unusedSpeakers[resultIndex] = unusedSpeakers | |
| #st.info(f"After update: {st.session_state.categorySelect}") | |
| def updateMultiSelect(): | |
| currFileIndex = file_names.index(st.session_state["select_currFile"]) | |
| st.session_state.resetResult = True | |
| for i, category in enumerate(st.session_state['categories']): | |
| st.session_state[f'multiselect_{category}'] = st.session_state['categorySelect'][currFileIndex][i] | |
| def analyze(inFileName): | |
| try: | |
| print(f"Start analyzing {inFileName}") | |
| st.session_state.resetResult = False | |
| currFileIndex = file_names.index(inFileName) | |
| print(f"Found at index {currFileIndex}") | |
| if len(st.session_state.results) > currFileIndex and len(st.session_state.summaries) > currFileIndex and len(st.session_state.results[currFileIndex]) > 0: | |
| printV(f'In if',4) | |
| # Handle | |
| currAnnotation, currTotalTime = st.session_state.results[currFileIndex] | |
| speakerNames = currAnnotation.labels() | |
| printV(f'Loaded results',4) | |
| # Update other categories | |
| unusedSpeakers = st.session_state.unusedSpeakers[currFileIndex] | |
| categorySelections = st.session_state["categorySelect"][currFileIndex] | |
| printV(f'Loaded speaker selections',4) | |
| noVoice, oneVoice, multiVoice = su.calcSpeakingTypes(currAnnotation,currTotalTime) | |
| sumNoVoice = su.sumTimes(noVoice) | |
| sumOneVoice = su.sumTimes(oneVoice) | |
| sumMultiVoice = su.sumTimes(multiVoice) | |
| printV(f'Calculated speaking types',4) | |
| df3 = pd.DataFrame( | |
| { | |
| "values": [sumNoVoice, | |
| sumOneVoice, | |
| sumMultiVoice], | |
| "names": ["No Voice","One Voice","Multi Voice"], | |
| } | |
| ) | |
| df3.name = "df3" | |
| st.session_state.summaries[currFileIndex]["df3"] = df3 | |
| printV(f'Set df3',4) | |
| df4_dict = {} | |
| nameList = st.session_state.categories | |
| extraNames = [] | |
| valueList = [0 for i in range(len(nameList))] | |
| extraValues = [] | |
| for sp in speakerNames: | |
| foundSp = False | |
| for i, categoryName in enumerate(nameList): | |
| if sp in categorySelections[i]: | |
| #st.info(categoryName) | |
| valueList[i] += su.sumTimes(currAnnotation.subset([sp])) | |
| foundSp = True | |
| break | |
| if foundSp: | |
| continue | |
| else: | |
| extraNames.append(sp) | |
| extraValues.append(su.sumTimes(currAnnotation.subset([sp]))) | |
| extraPairsSorted = sorted(zip(extraNames, extraValues), key=lambda pair: pair[0]) | |
| extraNames, extraValues = zip(*extraPairsSorted) | |
| df4_dict = { | |
| "values": valueList+list(extraValues), | |
| "names": nameList+list(extraNames), | |
| } | |
| df4 = pd.DataFrame(data=df4_dict) | |
| df4.name = "df4" | |
| st.session_state.summaries[currFileIndex]["df4"] = df4 | |
| printV(f'Set df4',4) | |
| speakerList,timeList = su.sumTimesPerSpeaker(oneVoice) | |
| multiSpeakerList, multiTimeList = su.sumMultiTimesPerSpeaker(multiVoice) | |
| summativeMultiSpeaker = sum(multiTimeList) | |
| basePercentiles = [sumNoVoice/currTotalTime, | |
| sumOneVoice/currTotalTime, | |
| sumMultiVoice/currTotalTime | |
| ] | |
| df5 = pd.DataFrame( | |
| { | |
| "ids" : ["NV","OV","MV"]+[f"OV_{i}" for i in range(len(speakerList))] | |
| +[f"MV_{i}" for i in range(len(multiSpeakerList))], | |
| "labels" : ["No Voice","One Voice","Multi Voice"] + speakerList + multiSpeakerList, | |
| "parents" : ["","",""]+["OV" for i in range(len(speakerList))] | |
| +["MV" for i in range(len(multiSpeakerList))], | |
| "parentNames" : ["Total","Total","Total"]+["One Voice" for i in range(len(speakerList))] | |
| +["Multi Voice" for i in range(len(multiSpeakerList))], | |
| "values" : [sumNoVoice, | |
| sumOneVoice, | |
| sumMultiVoice, | |
| ] + timeList + multiTimeList, | |
| "valueStrings" : [su.timeToString(sumNoVoice), | |
| su.timeToString(sumOneVoice), | |
| su.timeToString(sumMultiVoice), | |
| ] + su.timeToString(timeList) + su.timeToString(multiTimeList), | |
| "percentiles" : [basePercentiles[0]*100, | |
| basePercentiles[1]*100, | |
| basePercentiles[2]*100] + | |
| [(t*100) / sumOneVoice * basePercentiles[1] for t in timeList] + | |
| [(t*100) / summativeMultiSpeaker * basePercentiles[2] for t in multiTimeList], | |
| "parentPercentiles" : [basePercentiles[0]*100, | |
| basePercentiles[1]*100, | |
| basePercentiles[2]*100] + | |
| [(t*100) / sumOneVoice for t in timeList] + | |
| [(t*100) / summativeMultiSpeaker for t in multiTimeList], | |
| } | |
| ) | |
| df5.name = "df5" | |
| st.session_state.summaries[currFileIndex]["df5"] = df5 | |
| printV(f'Set df5',4) | |
| speakers_dataFrame,speakers_times = su.annotationToDataFrame(currAnnotation) | |
| st.session_state.summaries[currFileIndex]["speakers_dataFrame"] = speakers_dataFrame | |
| st.session_state.summaries[currFileIndex]["speakers_times"] = speakers_times | |
| df2_dict = { | |
| "values":[100*t/currTotalTime for t in df4_dict["values"]], | |
| "names":df4_dict["names"] | |
| } | |
| df2 = pd.DataFrame(df2_dict) | |
| st.session_state.summaries[currFileIndex]["df2"] = df2 | |
| printV(f'Set df2',4) | |
| except ValueError as e: | |
| print(f"Value Error: {e}") | |
| pass | |
| #---------------------------------------------------------------------------------------------------------------------- | |
| torch.classes.__path__ = [os.path.join(torch.__path__[0], torch.classes.__file__)] | |
| PARQUET_DATASET_DIR = Path("parquet_dataset") | |
| PARQUET_DATASET_DIR.mkdir(parents=True,exist_ok=True) | |
| sample_data = [f"CHEM1402_gt/24F_CHEM1402_Night_Class_Week_{i}_gt.rttm" for i in range(1,11)] | |
| scheduler = ps.ParquetScheduler(repo_id="Sonogram/SampleDataset") | |
| secondDifference = 5 | |
| gainWindow = 4 | |
| minimumGain = -45 | |
| maximumGain = -5 | |
| attenLimDB = 3 | |
| isGPU = False | |
| try: | |
| raise(RuntimeError("Not an error")) | |
| #device = xm.xla_device() | |
| print("TPU is available.") | |
| isGPU = True | |
| except RuntimeError as e: | |
| print(f"TPU is not available: {e}") | |
| # Fallback to CPU or other devices if needed | |
| isGPU = torch.cuda.is_available() | |
| device = torch.device("cuda" if isGPU else "cpu") | |
| print(f"Using {device} instead.") | |
| #device = xm.xla_device() | |
| if (enableDenoise): | |
| # Instantiate and prepare model for training. | |
| dfModel, dfState, _ = init_df(model_base_dir="DeepFilterNet3") | |
| dfModel.to(device)#torch.device("cuda")) | |
| pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1") | |
| pipeline.to(device)#torch.device("cuda")) | |
| # Store results for viewing and further processing | |
| # Long-range usage | |
| if 'results' not in st.session_state: | |
| st.session_state.results = [] | |
| if 'summaries' not in st.session_state: | |
| st.session_state.summaries = [] | |
| if 'categories' not in st.session_state: | |
| st.session_state.categories = [] | |
| st.session_state.categorySelect = [] | |
| # Single Use | |
| if 'removeCategory' not in st.session_state: | |
| st.session_state.removeCategory = None | |
| if 'resetResult' not in st.session_state: | |
| st.session_state.resetResult = False | |
| # Specific to target file | |
| if 'unusedSpeakers' not in st.session_state: | |
| st.session_state.unusedSpeakers = [] | |
| if 'file_names' not in st.session_state: | |
| st.session_state.file_names = [] | |
| if 'showSummary' not in st.session_state: | |
| st.session_state.showSummary = 'No' | |
| #st.set_page_config(layout="wide") | |
| st.title("Instructor Support Tool") | |
| if not isGPU: | |
| st.warning("TOOL CURRENTLY USING CPU, ANALYSIS EXTREMELY SLOW") | |
| st.write('If you would like to see a sample result generated from real classroom audio, use the sidebar on the left and press "Load Demo Example"') | |
| st.write('Keep in mind that this is a very early draft of the tool. Please be patient with any bugs/errors, and email Connor Young at czyoung@ualr.edu if you need help using the tool!') | |
| st.divider() | |
| st.write("Would you like additional data, charts, or features? We would love to hear more from you [about our project!](https://forms.gle/A32CdfGYSZoMPyyX9)") | |
| st.write("If you would like to learn more or work with us, please contact Dr. Mark Baillie at mtbaillie@ualr.edu") | |
| uploaded_file_paths = st.file_uploader("Upload an audio of classroom activity to analyze", accept_multiple_files=True) | |
| supported_file_types = ('.wav','.mp3','.mp4','.txt','.rttm','.csv') | |
| viewChoices = ["Voice Categories","Custom Categories","Detailed Voice Categories","Voice Category Treemap","Speaker Timeline","Time per Speaker"] | |
| valid_files = [] | |
| file_paths = [] | |
| currDF = None | |
| temp_dir = tempfile.mkdtemp() | |
| if uploaded_file_paths is not None: | |
| print("Found file paths") | |
| valid_files = [] | |
| file_paths = [] | |
| file_names = [] | |
| # Reset valid_files? | |
| for uploaded_file in uploaded_file_paths: | |
| if not uploaded_file.name.lower().endswith(supported_file_types): | |
| st.error('File must be of type: {}'.format(supported_file_types)) | |
| uploaded_file = None | |
| else: | |
| print(f"Valid file: {uploaded_file.name}") | |
| if uploaded_file not in valid_files: | |
| path = os.path.join(temp_dir, uploaded_file.name) | |
| with open(path, "wb") as f: | |
| f.write(uploaded_file.getvalue()) | |
| valid_files.append(uploaded_file) | |
| file_paths.append(path) | |
| # Save valid file names | |
| if len(valid_files) > 0: | |
| file_names = [f.name for f in valid_files] | |
| while (len(st.session_state.results) < len(valid_files)): | |
| st.session_state.results.append([]) | |
| while (len(st.session_state.summaries) < len(valid_files)): | |
| st.session_state.summaries.append([]) | |
| while (len(st.session_state.unusedSpeakers) < len(valid_files)): | |
| st.session_state.unusedSpeakers.append([]) | |
| while (len(st.session_state.categorySelect) < len(valid_files)): | |
| tempCategories = [[] for cat in st.session_state.categories] | |
| st.session_state.categorySelect.append(tempCategories) | |
| while (len(st.session_state.summaries) < len(valid_files)): | |
| st.session_state.summaries.append([]) | |
| st.session_state.file_names = file_names | |
| file_names = st.session_state.file_names | |
| if len(file_names) == 0: | |
| st.text("Upload file(s) to enable analysis") | |
| else: | |
| if st.button("Analyze All New Audio",key=f"button_all"): | |
| if len(valid_files) == 0: | |
| st.error('Upload file(s) first!') | |
| else: | |
| print("Start analyzing") | |
| start_time = time.time() | |
| totalFiles = len(valid_files) | |
| for i in range(totalFiles): | |
| if len(st.session_state.results) > i and len(st.session_state.results[i]) > 0: | |
| continue | |
| # Text files use sample data | |
| if file_paths[i].lower().endswith('.txt'): | |
| with st.spinner(text=f'Loading Demo File {i+1} of {totalFiles}'): | |
| # RTTM load as filler | |
| speakerList, annotations = su.loadAudioTXT(file_paths[i]) | |
| printV(annotations,4) | |
| # Approximate total seconds | |
| totalSeconds = 0 | |
| for segment in annotations.itersegments(): | |
| if segment.end > totalSeconds: | |
| totalSeconds = segment.end | |
| st.session_state.results[i] = (annotations, totalSeconds) | |
| st.session_state.summaries[i] = {} | |
| speakerNames = annotations.labels() | |
| st.session_state.unusedSpeakers[i] = speakerNames | |
| elif file_paths[i].lower().endswith('.rttm'): | |
| with st.spinner(text=f'Loading File {i+1} of {totalFiles}'): | |
| # RTTM load as filler | |
| speakerList, annotations = su.loadAudioRTTM(file_paths[i]) | |
| printV(annotations,4) | |
| # Approximate total seconds | |
| totalSeconds = 0 | |
| for segment in annotations.itersegments(): | |
| if segment.end > totalSeconds: | |
| totalSeconds = segment.end | |
| st.session_state.results[i] = (annotations, totalSeconds) | |
| st.session_state.summaries[i] = {} | |
| speakerNames = annotations.labels() | |
| st.session_state.unusedSpeakers[i] = speakerNames | |
| elif file_paths[i].lower().endswith('.csv'): | |
| with st.spinner(text=f'Loading File {i+1} of {totalFiles}'): | |
| # RTTM load as filler | |
| speakerList, annotations = su.loadAudioCSV(file_paths[i]) | |
| printV(annotations,4) | |
| # Approximate total seconds | |
| totalSeconds = 0 | |
| for segment in annotations.itersegments(): | |
| if segment.end > totalSeconds: | |
| totalSeconds = segment.end | |
| st.session_state.results[i] = (annotations, totalSeconds) | |
| st.session_state.summaries[i] = {} | |
| speakerNames = annotations.labels() | |
| st.session_state.unusedSpeakers[i] = speakerNames | |
| else: | |
| with st.spinner(text=f'Processing File {i+1} of {totalFiles}'): | |
| annotations, totalSeconds = processFile(file_paths[i]) | |
| print(f"Finished processing {file_paths[i]}") | |
| st.session_state.results[i] = (annotations, totalSeconds) | |
| print("Results saved") | |
| st.session_state.summaries[i] = {} | |
| print("Summaries saved") | |
| speakerNames = annotations.labels() | |
| st.session_state.unusedSpeakers[i] = speakerNames | |
| print("Speakers saved") | |
| with st.spinner(text=f'Analyzing File {i+1} of {totalFiles}'): | |
| analyze(file_names[i]) | |
| print(f"Finished analyzing {file_paths[i]}") | |
| print(f"Took {time.time() - start_time} seconds to analyze {totalFiles} files!") | |
| st.success(f"Took {time.time() - start_time} seconds to analyze {totalFiles} files!") | |
| class FakeUpload: | |
| def __init__(self,filepath): | |
| self.path = filepath | |
| self.name = filepath.split('/')[-1 | |
| ] | |
| demoPath = "sample.rttm" | |
| isDemo = False | |
| if st.sidebar.button("Load Demo Example"): | |
| sampleUpload = FakeUpload(demoPath) | |
| valid_files=[sampleUpload] | |
| file_paths=[sampleUpload.path] | |
| file_names=[sampleUpload.name] | |
| start_time = time.time() | |
| st.session_state.file_names = file_names | |
| # Save valid file names | |
| if len(valid_files) > 0: | |
| file_names = [f.name for f in valid_files] | |
| while (len(st.session_state.results) < len(valid_files)): | |
| st.session_state.results.append([]) | |
| while (len(st.session_state.summaries) < len(valid_files)): | |
| st.session_state.summaries.append([]) | |
| while (len(st.session_state.unusedSpeakers) < len(valid_files)): | |
| st.session_state.unusedSpeakers.append([]) | |
| while (len(st.session_state.categorySelect) < len(valid_files)): | |
| tempCategories = [[] for cat in st.session_state.categories] | |
| st.session_state.categorySelect.append(tempCategories) | |
| while (len(st.session_state.summaries) < len(valid_files)): | |
| st.session_state.summaries.append([]) | |
| with st.spinner(text=f'Loading Demo Sample'): | |
| # RTTM load as filler | |
| speakerList, annotations = su.loadAudioRTTM(file_paths[0]) | |
| # Approximate total seconds | |
| totalSeconds = 0 | |
| for segment in annotations.itersegments(): | |
| if segment.end > totalSeconds: | |
| totalSeconds = segment.end | |
| st.session_state.results = [(annotations, totalSeconds)] | |
| st.session_state.summaries = [{}] | |
| speakerNames = annotations.labels() | |
| st.session_state.unusedSpeakers = [speakerNames] | |
| with st.spinner(text=f'Analyzing Demo Data'): | |
| analyze(file_names[0]) | |
| st.success(f"Took {time.time() - start_time} seconds to analyze the demo file!") | |
| st.session_state.select_currFile=file_names[0] | |
| isDemo = True | |
| currFile = st.sidebar.selectbox('Current File', file_names,on_change=updateMultiSelect,key="select_currFile") | |
| if isDemo: | |
| currFile=file_names[0] | |
| isDemo = False | |
| if currFile is None and len(st.session_state.results) > 0 and len(st.session_state.results[0]) > 0: | |
| st.write("Select a file to view from the sidebar") | |
| try: | |
| st.session_state.resetResult = False | |
| currFileIndex = file_names.index(currFile) | |
| currPlainName = currFile.split('.')[0] | |
| if len(st.session_state.results) > currFileIndex and len(st.session_state.summaries) > currFileIndex and len(st.session_state.results[currFileIndex]) > 0: | |
| st.header(f"Analysis of file {currFile}") | |
| graphNames = ["Data","Voice Categories","Speaker Percentage","Speakers with Categories","Treemap","Timeline","Time Spoken"] | |
| dataTab, pie1, pie2, sunburst1, treemap1, timeline, bar1 = st.tabs(graphNames) | |
| # Handle | |
| currAnnotation, currTotalTime = st.session_state.results[currFileIndex] | |
| speakerNames = currAnnotation.labels() | |
| speakers_dataFrame = st.session_state.summaries[currFileIndex]["speakers_dataFrame"] | |
| currDF, _ = su.annotationToSimpleDataFrame(currAnnotation) | |
| speakers_times = st.session_state.summaries[currFileIndex]["speakers_times"] | |
| # Update other categories | |
| unusedSpeakers = st.session_state.unusedSpeakers[currFileIndex] | |
| categorySelections = st.session_state["categorySelect"][currFileIndex] | |
| for i,category in enumerate(st.session_state.categories): | |
| speakerSet = categorySelections[i] | |
| st.sidebar.multiselect(category, | |
| speakerSet+unusedSpeakers, | |
| default=speakerSet, | |
| key=f"multiselect_{category}", | |
| on_change=updateCategoryOptions, | |
| args=(currFileIndex,)) | |
| st.sidebar.button(f"Remove {category}",key=f"remove_{category}",on_click=removeCategory,args=(i,)) | |
| newCategory = st.sidebar.text_input('Add category', key='categoryInput',on_change=addCategory) | |
| catTypeColors = su.colorsCSS(3) | |
| allColors = su.colorsCSS(len(speakerNames)+len(st.session_state.categories)) | |
| speakerColors = allColors[:len(speakerNames)] | |
| catColors = allColors[len(speakerNames):] | |
| df4_dict = {} | |
| nameList = st.session_state.categories | |
| extraNames = [] | |
| valueList = [0 for i in range(len(nameList))] | |
| extraValues = [] | |
| for i,speakerSet in enumerate(categorySelections): | |
| valueList[i] += su.sumTimes(currAnnotation.subset(speakerSet)) | |
| for sp in unusedSpeakers: | |
| extraNames.append(sp) | |
| extraValues.append(su.sumTimes(currAnnotation.subset([sp]))) | |
| df4_dict = { | |
| "names": nameList+extraNames, | |
| "values": valueList+extraValues, | |
| } | |
| df4 = pd.DataFrame(data=df4_dict) | |
| df4.name = "df4" | |
| st.session_state.summaries[currFileIndex]["df4"] = df4 | |
| with dataTab: | |
| csv = convert_df(currDF) | |
| st.download_button( | |
| "Press to Download analysis data", | |
| csv, | |
| 'sonogram-analysis-'+currPlainName+'.csv', | |
| "text/csv", | |
| key='download-csv', | |
| on_click="ignore", | |
| ) | |
| st.dataframe(currDF) | |
| with pie1: | |
| printV("In Pie1",4) | |
| df3 = st.session_state.summaries[currFileIndex]["df3"] | |
| fig1 = go.Figure() | |
| fig1.update_layout( | |
| title_text="Percentage of each Voice Category", | |
| colorway=catTypeColors, | |
| plot_bgcolor='rgba(0, 0, 0, 0)', | |
| paper_bgcolor='rgba(0, 0, 0, 0)', | |
| ) | |
| printV("Pie1 Pretrace",4) | |
| fig1.add_trace(go.Pie(values=df3["values"],labels=df3["names"],sort=False)) | |
| printV("Pie1 Posttrace",4) | |
| col1_1, col1_2 = st.columns(2) | |
| fig1.write_image("ascn_pie1.pdf") | |
| fig1.write_image("ascn_pie1.svg") | |
| printV("Pie1 files written",4) | |
| with col1_1: | |
| printV("Pie1 in col1_1",4) | |
| with open('ascn_pie1.pdf','rb') as f: | |
| printV("Pie1 in file open",4) | |
| st.download_button( | |
| "Save As PDF", | |
| f, | |
| 'sonogram-voice-category-'+currPlainName+'.pdf', | |
| 'application/pdf', | |
| key='download-pdf1', | |
| on_click="ignore", | |
| ) | |
| printV("Pie1 after col1_1",4) | |
| with col1_2: | |
| with open('ascn_pie1.svg','rb') as f: | |
| st.download_button( | |
| "Save As SVG", | |
| f, | |
| 'sonogram-voice-category-'+currPlainName+'.svg', | |
| 'image/svg+xml', | |
| key='download-svg1', | |
| on_click="ignore", | |
| ) | |
| printV("Pie1 in col1_2",4) | |
| st.plotly_chart(fig1, use_container_width=True,config=config) | |
| printV("Pie1 post plotly",4) | |
| with pie2: | |
| df4 = st.session_state.summaries[currFileIndex]["df4"] | |
| # Some speakers may be missing, so fix colors | |
| figColors = [] | |
| for n in df4["names"]: | |
| if n in speakerNames: | |
| figColors.append(speakerColors[speakerNames.index(n)]) | |
| fig2 = go.Figure() | |
| fig2.update_layout( | |
| title_text="Percentage of Speakers and Custom Categories", | |
| colorway=catColors+figColors, | |
| plot_bgcolor='rgba(0, 0, 0, 0)', | |
| paper_bgcolor='rgba(0, 0, 0, 0)', | |
| ) | |
| fig2.add_trace(go.Pie(values=df4["values"],labels=df4["names"],sort=False)) | |
| col2_1, col2_2 = st.columns(2) | |
| fig2.write_image("ascn_pie2.pdf") | |
| fig2.write_image("ascn_pie2.svg") | |
| with col2_1: | |
| with open('ascn_pie2.pdf','rb') as f: | |
| st.download_button( | |
| "Save As PDF", | |
| f, | |
| 'sonogram-speaker-percent-'+currPlainName+'.pdf', | |
| 'application/pdf', | |
| key='download-pdf2', | |
| on_click="ignore", | |
| ) | |
| with col2_2: | |
| with open('ascn_pie2.svg','rb') as f: | |
| st.download_button( | |
| "Save As SVG", | |
| f, | |
| 'sonogram-speaker-percent-'+currPlainName+'.svg', | |
| 'image/svg+xml', | |
| key='download-svg2', | |
| on_click="ignore", | |
| ) | |
| st.plotly_chart(fig2, use_container_width=True,config=config) | |
| with sunburst1: | |
| df5 = st.session_state.summaries[currFileIndex]["df5"] | |
| fig3_1 = px.sunburst(df5, | |
| branchvalues = 'total', | |
| names = "labels", | |
| ids = "ids", | |
| parents = "parents", | |
| values = "percentiles", | |
| custom_data=['labels','valueStrings','percentiles','parentNames','parentPercentiles'], | |
| color = 'labels', | |
| title="Percentage of each Voice Category with Speakers", | |
| color_discrete_sequence=catTypeColors+speakerColors, | |
| ) | |
| fig3_1.update_traces( | |
| hovertemplate="<br>".join([ | |
| '<b>%{customdata[0]}</b>', | |
| 'Duration: %{customdata[1]}s', | |
| 'Percentage of Total: %{customdata[2]:.2f}%', | |
| 'Parent: %{customdata[3]}', | |
| 'Percentage of Parent: %{customdata[4]:.2f}%' | |
| ]) | |
| ) | |
| fig3_1.update_layout( | |
| plot_bgcolor='rgba(0, 0, 0, 0)', | |
| paper_bgcolor='rgba(0, 0, 0, 0)', | |
| ) | |
| col3_1, col3_2 = st.columns(2) | |
| fig3_1.write_image("ascn_sunburst.pdf") | |
| fig3_1.write_image("ascn_sunburst.svg") | |
| with col3_1: | |
| with open('ascn_sunburst.pdf','rb') as f: | |
| st.download_button( | |
| "Save As PDF", | |
| f, | |
| 'sonogram-speaker-categories-'+currPlainName+'.pdf', | |
| 'application/pdf', | |
| key='download-pdf3', | |
| on_click="ignore", | |
| ) | |
| with col3_2: | |
| with open('ascn_sunburst.svg','rb') as f: | |
| st.download_button( | |
| "Save As SVG", | |
| f, | |
| 'sonogram-speaker-categories-'+currPlainName+'.svg', | |
| 'image/svg+xml', | |
| key='download-svg3', | |
| on_click="ignore", | |
| ) | |
| st.plotly_chart(fig3_1, use_container_width=True,config=config) | |
| with treemap1: | |
| df5 = st.session_state.summaries[currFileIndex]["df5"] | |
| fig3 = px.treemap(df5, | |
| branchvalues = "total", | |
| names = "labels", | |
| parents = "parents", | |
| ids="ids", | |
| values = "percentiles", | |
| custom_data=['labels','valueStrings','percentiles','parentNames','parentPercentiles'], | |
| color='labels', | |
| title="Division of Speakers in each Voice Category", | |
| color_discrete_sequence=catTypeColors+speakerColors, | |
| ) | |
| fig3.update_traces( | |
| hovertemplate="<br>".join([ | |
| '<b>%{customdata[0]}</b>', | |
| 'Duration: %{customdata[1]}s', | |
| 'Percentage of Total: %{customdata[2]:.2f}%', | |
| 'Parent: %{customdata[3]}', | |
| 'Percentage of Parent: %{customdata[4]:.2f}%' | |
| ]) | |
| ) | |
| fig3.update_layout( | |
| plot_bgcolor='rgba(0, 0, 0, 0)', | |
| paper_bgcolor='rgba(0, 0, 0, 0)', | |
| ) | |
| col4_1, col4_2 = st.columns(2) | |
| fig3.write_image("ascn_treemap.pdf") | |
| fig3.write_image("ascn_treemap.svg") | |
| with col4_1: | |
| with open('ascn_treemap.pdf','rb') as f: | |
| st.download_button( | |
| "Save As PDF", | |
| f, | |
| 'sonogram-treemap-'+currPlainName+'.pdf', | |
| 'application/pdf', | |
| key='download-pdf4', | |
| on_click="ignore", | |
| ) | |
| with col4_2: | |
| with open('ascn_treemap.svg','rb') as f: | |
| st.download_button( | |
| "Save As SVG", | |
| f, | |
| 'sonogram-treemap-'+currPlainName+'.svg', | |
| 'image/svg+xml', | |
| key='download-svg4', | |
| on_click="ignore", | |
| ) | |
| st.plotly_chart(fig3, use_container_width=True,config=config) | |
| # generate plotting window | |
| with timeline: | |
| fig_la = px.timeline(speakers_dataFrame, x_start="Start", x_end="Finish", y="Resource", color="Resource",title="Timeline of Audio with Speakers", | |
| color_discrete_sequence=speakerColors) | |
| fig_la.update_yaxes(autorange="reversed") | |
| hMax = int(currTotalTime//3600) | |
| mMax = int(currTotalTime%3600//60) | |
| sMax = int(currTotalTime%60) | |
| msMax = int(currTotalTime*1000000%1000000) | |
| timeMax = dt.time(hMax,mMax,sMax,msMax) | |
| fig_la.update_layout( | |
| xaxis_tickformatstops = [ | |
| dict(dtickrange=[None, 1000], value="%H:%M:%S.%L"), | |
| dict(dtickrange=[1000, None], value="%H:%M:%S") | |
| ], | |
| xaxis=dict( | |
| range=[dt.datetime.combine(dt.date.today(), dt.time.min),dt.datetime.combine(dt.date.today(), timeMax)] | |
| ), | |
| xaxis_title="Time", | |
| yaxis_title="Speaker", | |
| legend_title=None, | |
| plot_bgcolor='rgba(0, 0, 0, 0)', | |
| paper_bgcolor='rgba(0, 0, 0, 0)', | |
| legend={'traceorder':'reversed'}, | |
| yaxis= {'showticklabels': False}, | |
| ) | |
| col5_1, col5_2 = st.columns(2) | |
| fig_la.write_image("ascn_timeline.pdf") | |
| fig_la.write_image("ascn_timeline.svg") | |
| with col5_1: | |
| with open('ascn_timeline.pdf','rb') as f: | |
| st.download_button( | |
| "Save As PDF", | |
| f, | |
| 'sonogram-timeline-'+currPlainName+'.pdf', | |
| 'application/pdf', | |
| key='download-pdf5', | |
| on_click="ignore", | |
| ) | |
| with col5_2: | |
| with open('ascn_timeline.svg','rb') as f: | |
| st.download_button( | |
| "Save As SVG", | |
| f, | |
| 'sonogram-timeline-'+currPlainName+'.svg', | |
| 'image/svg+xml', | |
| key='download-svg5', | |
| on_click="ignore", | |
| ) | |
| st.plotly_chart(fig_la, use_container_width=True,config=config) | |
| with bar1: | |
| df2 = st.session_state.summaries[currFileIndex]["df2"] | |
| fig2_la = px.bar(df2, x="values", y="names", color="names", orientation='h', | |
| custom_data=["names","values"],title="Time Spoken by each Speaker", | |
| color_discrete_sequence=catColors+speakerColors) | |
| fig2_la.update_xaxes(ticksuffix="%") | |
| fig2_la.update_yaxes(autorange="reversed") | |
| fig2_la.update_layout( | |
| xaxis_title="Percentage Time Spoken", | |
| yaxis_title="Speaker", | |
| legend_title=None, | |
| plot_bgcolor='rgba(0, 0, 0, 0)', | |
| paper_bgcolor='rgba(0, 0, 0, 0)', | |
| legend={'traceorder':'reversed'}, | |
| yaxis= {'showticklabels': False}, | |
| ) | |
| fig2_la.update_traces( | |
| hovertemplate="<br>".join([ | |
| '<b>%{customdata[0]}</b>', | |
| 'Percentage of Time: %{customdata[1]:.2f}%' | |
| ]) | |
| ) | |
| col6_1, col6_2 = st.columns(2) | |
| fig_la.write_image("ascn_bar.pdf") | |
| fig_la.write_image("ascn_bar.svg") | |
| with col6_1: | |
| with open('ascn_bar.pdf','rb') as f: | |
| st.download_button( | |
| "Save As PDF", | |
| f, | |
| 'sonogram-speaker-time-'+currPlainName+'.pdf', | |
| 'application/pdf', | |
| key='download-pdf6', | |
| on_click="ignore", | |
| ) | |
| with col6_2: | |
| with open('ascn_bar.svg','rb') as f: | |
| st.download_button( | |
| "Save As SVG", | |
| f, | |
| 'sonogram-speaker-time-'+currPlainName+'.svg', | |
| 'image/svg+xml', | |
| key='download-svg6', | |
| on_click="ignore", | |
| ) | |
| st.plotly_chart(fig2_la, use_container_width=True,config=config) | |
| except ValueError: | |
| pass | |
| if len(st.session_state.results) > 0: | |
| with st.expander("Multi-file Summary Data"): | |
| st.header("Multi-file Summary Data") | |
| with st.spinner(text='Processing summary results...'): | |
| fileNames = st.session_state.file_names | |
| results = [] | |
| indices = [] | |
| for i, resultTuple in enumerate(st.session_state.results): | |
| if len(resultTuple) == 2: | |
| results.append(resultTuple) | |
| indices.append(i) | |
| if len(indices) > 1: | |
| df6_dict = { | |
| "files":fileNames, | |
| } | |
| allCategories = copy.deepcopy(st.session_state.categories) | |
| for i in indices: | |
| currAnnotation, currTotalTime = st.session_state.results[i] | |
| categorySelections = st.session_state["categorySelect"][i] | |
| catSummary,extraCats = su.calcCategories(currAnnotation,categorySelections) | |
| st.session_state.summaries[i]["categories"] = (catSummary,extraCats) | |
| for extra in extraCats: | |
| df6_dict[extra] = [] | |
| if extra not in allCategories: | |
| allCategories.append(extra) | |
| for category in st.session_state.categories: | |
| df6_dict[category] = [] | |
| for i in indices: | |
| summary, extras = st.session_state.summaries[i]["categories"] | |
| theseCategories = st.session_state.categories + extras | |
| for j, timeSlots in enumerate(summary): | |
| df6_dict[theseCategories[j]].append(sum([t.duration for _,t in timeSlots])/st.session_state.results[i][1]) | |
| for category in allCategories: | |
| if category not in theseCategories: | |
| df6_dict[category].append(0) | |
| df6 = pd.DataFrame(df6_dict) | |
| summFig = px.bar(df6, x="files", y=allCategories,title="Time Spoken by Each Speaker in Each File") | |
| st.plotly_chart(summFig, use_container_width=True,config=config) | |
| voiceNames = ["No Voice","One Voice","Multi Voice"] | |
| df7_dict = { | |
| "files":fileNames, | |
| } | |
| for category in voiceNames: | |
| df7_dict[category] = [] | |
| for resultID,summary in enumerate(st.session_state.summaries): | |
| partialDf = summary["df5"] | |
| for i in range(len(voiceNames)): | |
| df7_dict[voiceNames[i]].append(partialDf["percentiles"][i]) | |
| df7 = pd.DataFrame(df7_dict) | |
| sorted_df7 = df7.sort_values(by=['One Voice', 'Multi Voice']) | |
| summFig2 = px.bar(sorted_df7, x="files", y=["One Voice","Multi Voice","No Voice",],title="Cross-file Voice Categories sorted for One Voice") | |
| st.plotly_chart(summFig2, use_container_width=True,config=config) | |
| sorted_df7_3 = df7.sort_values(by=['Multi Voice','One Voice']) | |
| summFig3 = px.bar(sorted_df7_3, x="files", y=["One Voice","Multi Voice","No Voice",],title="Cross-file Voice Categories sorted for Multi Voice") | |
| st.plotly_chart(summFig3, use_container_width=True,config=config) | |
| sorted_df7_4 = df7.sort_values(by=['No Voice', 'Multi Voice'],ascending=False) | |
| summFig4 = px.bar(sorted_df7_4, x="files", y=["One Voice","Multi Voice","No Voice",],title="Cross-file Voice Categories sorted for Any Voice") | |
| st.plotly_chart(summFig4, use_container_width=True,config=config) | |
| old = '''userid = st.text_input("user id:", "Guest") | |
| colorPref = st.text_input("Favorite color?", "None") | |
| radio = st.radio('Pick one:', ['Left','Right']) | |
| selection = st.selectbox('Select', [1,2,3]) | |
| if st.button("Upload Files to Dataset"): | |
| save_data({"color":colorPref,"direction":radio,"number":selection}, | |
| file_paths, | |
| userid) | |
| st.success('I think it worked!') | |
| ''' | |
| def convert_df(df): | |
| return df.to_csv(index=False).encode('utf-8') | |
| with st.expander("Instructions and additional details"): | |
| st.write("Thank you for viewing our experimental app! The overall presentations and features are expected to be improved over time, you can think of this as our first rough draft!") | |
| st.write("To use this app:\n1. Upload an audio file for live analysis. Alternatively, you can upload an already generated [rttm file](https://stackoverflow.com/questions/30975084/rttm-file-format)") | |
| st.write("2. Press Analyze All. Note that no data is saved on our side, so we will not have access to your recordings. Future versions of this app will support donating audio to us for aid in our research.") | |
| st.write("3. Use the side bar on the left to select your file (may have to be expanded by clicking the > ). Our app supports uploading multiple files for more comprehensive analysis.") | |
| st.write("4. Use the tabs provided to view different visualizations of your audio. Each example can be downloaded for personal use.") | |
| st.write("4a. The graphs are built using [plotly](https://plotly.com/). This allows for a high degree of interaction. Feel free to experiment with the graphs, as you can always return to the original view by double-clicking on the graph. For more examples of easily supported visualizations, see [here](https://plotly.com/python/basic-charts/)") | |
| with st.expander("(Potentially) FAQ"): | |
| st.write(f"**1. I tried analyzing a file, but the page refreshed and nothing happened! Why?**\n\t") | |
| st.write("You may need to select a file using the side bar on the left. This app supports multiple files, so we require that you select which file to view after analysis.") | |
| st.write(f"**2. I don't see a sidebar! Where is it?**\n\t") | |
| st.write("The side bar may start by being minimized. Press the '>' in the upper left to expand the side bar.") | |
| st.write(f"**3. I still don't have a file to select in the dropdown! Why?**\n\t") | |
| st.write("If you are sure that you have run Analyze All and after refresh no files may be selected, then your file is likely too large. We currently have a limitation of approximately 1.5 hours of audio. This is a known issue that requires additional time **or** money to solve, and is expected to be fixed by the next update of this app. Please be patient!") | |
| st.write(f"**4. I want to be able to view my previously analyzed data! How can I do this?**\n\t") | |
| st.write("You can download a CSV copy of the data using the first tab. From there, you can reupload the CSV copy at a later date to view the data visualizations without having to use your original audio file. Future versions of this app will support creating optional logins for long term storage and analysis.") | |
| st.write(f"**5. The app says 'TOOL CURRENTLY USING CPU, ANALYSIS EXTREMELY SLOW' and takes forever to analyze audio! What is wrong?**\n\t") | |
| st.write("We are currently in the process of securing funding to allow permanent public access to this tool. Until then, we can provide an interface to view already analyzed data without cost to you or us. While this mode will technically still work, it may take over a day to analyze your audio. Feel free to reach out to us to discuss temporary solutions to this until the app's funding is secured!") |