mistermprah commited on
Commit
08fbee0
·
verified ·
1 Parent(s): b10f2fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -13
app.py CHANGED
@@ -1,6 +1,8 @@
1
  import streamlit as st
2
  from transformers import pipeline
3
  import torchaudio
 
 
4
  from config import MODEL_ID
5
 
6
  # Load the model and pipeline using the model_id variable
@@ -35,17 +37,17 @@ if theme == "Light Green":
35
  """
36
  <style>
37
  body, .stApp {
38
- background-color: #e8f5e9; /* Light green background */
39
  }
40
  .stApp {
41
- color: #004d40; /* Dark green text */
42
  }
43
  .stButton > button, .stFileUpload > div {
44
- background-color: #004d40; /* Dark green button and file uploader background */
45
- color: white; /* White text */
46
  }
47
  .stButton > button:hover, .stFileUpload > div:hover {
48
- background-color: #00332c; /* Darker green on hover */
49
  }
50
  </style>
51
  """,
@@ -56,24 +58,23 @@ elif theme == "Light Blue":
56
  """
57
  <style>
58
  body, .stApp {
59
- background-color: #e0f7fa; /* Light blue background */
60
  }
61
  .stApp {
62
- color: #006064; /* Dark blue text */
63
  }
64
  .stButton > button, .stFileUpload > div {
65
- background-color: #006064; /* Dark blue button and file uploader background */
66
- color: white; /* White text */
67
  }
68
  .stButton > button:hover, .stFileUpload > div:hover {
69
- background-color: #004d40; /* Darker blue on hover */
70
  }
71
  </style>
72
  """,
73
  unsafe_allow_html=True
74
  )
75
 
76
-
77
  # File uploader for audio files
78
  uploaded_file = st.file_uploader("Upload an audio file", type=["wav", "mp3"])
79
 
@@ -87,11 +88,34 @@ if uploaded_file is not None:
87
  with open("temp_audio_file.wav", "wb") as f:
88
  f.write(audio_bytes)
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  # Classify the audio file
91
  st.write("Classifying the audio...")
92
  results = classify_audio("temp_audio_file.wav")
93
 
94
- # Display the classification results in a dedicated output box
95
  st.subheader("Classification Results")
96
  results_box = st.empty()
97
  results_str = "\n".join([f"{label}: {score:.2f}" for label, score in results.items()])
@@ -105,7 +129,32 @@ for example in examples:
105
  st.subheader(f"Sample Audio: {example}")
106
  audio_bytes = open(example, 'rb').read()
107
  st.audio(audio_bytes, format='audio/wav')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  results = classify_audio(example)
109
  st.write("Results:")
110
  results_str = "\n".join([f"{label}: {score:.2f}" for label, score in results.items()])
111
- st.text(results_str)
 
1
  import streamlit as st
2
  from transformers import pipeline
3
  import torchaudio
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
  from config import MODEL_ID
7
 
8
  # Load the model and pipeline using the model_id variable
 
37
  """
38
  <style>
39
  body, .stApp {
40
+ background-color: #e8f5e9;
41
  }
42
  .stApp {
43
+ color: #004d40;
44
  }
45
  .stButton > button, .stFileUpload > div {
46
+ background-color: #004d40;
47
+ color: white;
48
  }
49
  .stButton > button:hover, .stFileUpload > div:hover {
50
+ background-color: #00332c;
51
  }
52
  </style>
53
  """,
 
58
  """
59
  <style>
60
  body, .stApp {
61
+ background-color: #e0f7fa;
62
  }
63
  .stApp {
64
+ color: #006064;
65
  }
66
  .stButton > button, .stFileUpload > div {
67
+ background-color: #006064;
68
+ color: white;
69
  }
70
  .stButton > button:hover, .stFileUpload > div:hover {
71
+ background-color: #004d40;
72
  }
73
  </style>
74
  """,
75
  unsafe_allow_html=True
76
  )
77
 
 
78
  # File uploader for audio files
79
  uploaded_file = st.file_uploader("Upload an audio file", type=["wav", "mp3"])
80
 
 
88
  with open("temp_audio_file.wav", "wb") as f:
89
  f.write(audio_bytes)
90
 
91
+ # Load audio for visualization
92
+ waveform, sample_rate = torchaudio.load("temp_audio_file.wav")
93
+
94
+ # Visualization selection
95
+ viz_type = st.radio("Select visualization type:", ["Waveform", "Spectrogram"])
96
+
97
+ # Create visualization
98
+ fig, ax = plt.subplots(figsize=(10, 4))
99
+ if viz_type == "Waveform":
100
+ time = np.arange(waveform.shape[1]) / sample_rate
101
+ ax.plot(time, waveform[0].numpy())
102
+ ax.set_title("Audio Waveform")
103
+ ax.set_xlabel("Time (s)")
104
+ ax.set_ylabel("Amplitude")
105
+ ax.set_xlim([0, time[-1]])
106
+ else:
107
+ ax.specgram(waveform[0].numpy(), Fs=sample_rate, cmap='viridis', NFFT=1024, noverlap=512)
108
+ ax.set_title("Spectrogram")
109
+ ax.set_xlabel("Time (s)")
110
+ ax.set_ylabel("Frequency (Hz)")
111
+
112
+ st.pyplot(fig)
113
+
114
  # Classify the audio file
115
  st.write("Classifying the audio...")
116
  results = classify_audio("temp_audio_file.wav")
117
 
118
+ # Display the classification results
119
  st.subheader("Classification Results")
120
  results_box = st.empty()
121
  results_str = "\n".join([f"{label}: {score:.2f}" for label, score in results.items()])
 
129
  st.subheader(f"Sample Audio: {example}")
130
  audio_bytes = open(example, 'rb').read()
131
  st.audio(audio_bytes, format='audio/wav')
132
+
133
+ # Load audio for visualization
134
+ waveform, sample_rate = torchaudio.load(example)
135
+
136
+ # Visualization selection
137
+ viz_type = st.radio("Select visualization type:", ["Waveform", "Spectrogram"], key=example)
138
+
139
+ # Create visualization
140
+ fig, ax = plt.subplots(figsize=(10, 4))
141
+ if viz_type == "Waveform":
142
+ time = np.arange(waveform.shape[1]) / sample_rate
143
+ ax.plot(time, waveform[0].numpy())
144
+ ax.set_title("Audio Waveform")
145
+ ax.set_xlabel("Time (s)")
146
+ ax.set_ylabel("Amplitude")
147
+ ax.set_xlim([0, time[-1]])
148
+ else:
149
+ ax.specgram(waveform[0].numpy(), Fs=sample_rate, cmap='viridis', NFFT=1024, noverlap=512)
150
+ ax.set_title("Spectrogram")
151
+ ax.set_xlabel("Time (s)")
152
+ ax.set_ylabel("Frequency (Hz)")
153
+
154
+ st.pyplot(fig)
155
+
156
+ # Classification results
157
  results = classify_audio(example)
158
  st.write("Results:")
159
  results_str = "\n".join([f"{label}: {score:.2f}" for label, score in results.items()])
160
+ st.text(results_str)