Update src/streamlit_app.py
Browse files- src/streamlit_app.py +15 -14
src/streamlit_app.py
CHANGED
|
@@ -129,26 +129,27 @@ if st.button("Analyze"):
|
|
| 129 |
st.write("Audio saved at:", audio_path)
|
| 130 |
st.write("Exists:", os.path.exists(audio_path))
|
| 131 |
|
| 132 |
-
with st.spinner("Transcribing with Whisper..."):
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
|
| 139 |
with st.spinner("Classifying accent..."):
|
| 140 |
-
|
| 141 |
-
# pipe = pipeline('audio-classification', model=model_name, device=-1) # GPU (device=0) or CPU (device=-1)
|
| 142 |
-
# accent_data = accent_classify(pipe, audio_path)
|
| 143 |
-
audio_df = split_audio(audio_path)
|
| 144 |
waves = f"{np.concatenate(audio_df["audio"][:5].to_list())}"
|
| 145 |
st.markdown("**Audio waves:**")
|
| 146 |
st.text_area("Audio waves", waves, height=200)
|
| 147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
# audio_df = split_audio(audio_path)
|
| 149 |
# print(np.concatenate(audio_df["audio"][:50].to_list()))
|
| 150 |
|
| 151 |
-
accent_data = {"label": "American", "score": 0.9}
|
| 152 |
accent = accent_data.get("label", "American")
|
| 153 |
confidence = accent_data.get("score", 0.0)
|
| 154 |
# pass
|
|
@@ -156,8 +157,8 @@ if st.button("Analyze"):
|
|
| 156 |
st.success("Analysis Complete!")
|
| 157 |
st.markdown(f"**Accent:** {accent}")
|
| 158 |
st.markdown(f"**Confidence Score:** {confidence:.2f}%")
|
| 159 |
-
st.markdown("**Transcription:**")
|
| 160 |
-
st.text_area("Transcript", transcription, height=200)
|
| 161 |
|
| 162 |
# Cleanup
|
| 163 |
os.remove(video_path)
|
|
|
|
| 129 |
st.write("Audio saved at:", audio_path)
|
| 130 |
st.write("Exists:", os.path.exists(audio_path))
|
| 131 |
|
| 132 |
+
# with st.spinner("Transcribing with Whisper..."):
|
| 133 |
+
# whisper_model = whisper.load_model("base")
|
| 134 |
+
# result = whisper_model.transcribe(audio_path)
|
| 135 |
+
# transcription = result['text']
|
| 136 |
+
# transcription = "Hello There"
|
| 137 |
+
# pass
|
| 138 |
|
| 139 |
with st.spinner("Classifying accent..."):
|
| 140 |
+
|
|
|
|
|
|
|
|
|
|
| 141 |
waves = f"{np.concatenate(audio_df["audio"][:5].to_list())}"
|
| 142 |
st.markdown("**Audio waves:**")
|
| 143 |
st.text_area("Audio waves", waves, height=200)
|
| 144 |
+
|
| 145 |
+
model_name = "dima806/english_accents_classification"
|
| 146 |
+
pipe = pipeline('audio-classification', model=model_name, device=0) # GPU (device=0) or CPU (device=-1)
|
| 147 |
+
accent_data = accent_classify(pipe, audio_path)
|
| 148 |
+
audio_df = split_audio(audio_path)
|
| 149 |
# audio_df = split_audio(audio_path)
|
| 150 |
# print(np.concatenate(audio_df["audio"][:50].to_list()))
|
| 151 |
|
| 152 |
+
# accent_data = {"label": "American", "score": 0.9}
|
| 153 |
accent = accent_data.get("label", "American")
|
| 154 |
confidence = accent_data.get("score", 0.0)
|
| 155 |
# pass
|
|
|
|
| 157 |
st.success("Analysis Complete!")
|
| 158 |
st.markdown(f"**Accent:** {accent}")
|
| 159 |
st.markdown(f"**Confidence Score:** {confidence:.2f}%")
|
| 160 |
+
# st.markdown("**Transcription:**")
|
| 161 |
+
# st.text_area("Transcript", transcription, height=200)
|
| 162 |
|
| 163 |
# Cleanup
|
| 164 |
os.remove(video_path)
|