MeysamSh commited on
Commit
7d9277c
Β·
1 Parent(s): faf6058
Files changed (2) hide show
  1. app.py +200 -114
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,130 +1,216 @@
1
- # Hugging Face Spaces – Phone Vibration Sound Classifier
2
- # -------------------------------------------------
3
- # Features:
4
- # - Record 2x 60-second audio samples (Class A and Class B)
5
- # - Sliding window segmentation (20 ms)
6
- # - MFCC feature extraction
7
- # - Train / fine‑tune a classifier
8
- # - Record a 3rd sample for testing and predict class
9
- #
10
- # Requirements (automatically handled by Spaces):
11
- # gradio, numpy, librosa, scikit-learn, soundfile
12
- #
13
- # Space type: Gradio
14
-
15
  import gradio as gr
16
  import numpy as np
17
  import librosa
18
- from sklearn.linear_model import LogisticRegression
 
19
  from sklearn.preprocessing import StandardScaler
20
  from sklearn.pipeline import Pipeline
21
- import tempfile
22
- import os
23
 
 
24
  SAMPLE_RATE = 16000
25
- WINDOW_MS = 20
26
  WINDOW_SAMPLES = int(SAMPLE_RATE * WINDOW_MS / 1000)
27
  N_MFCC = 13
28
-
29
- # Global model storage
30
- model_pipeline = None
31
- class_names = ["Class A", "Class B"]
32
-
33
-
34
- def audio_to_windows(y: np.ndarray, sr: int):
35
- hop = WINDOW_SAMPLES
36
- windows = []
37
- for start in range(0, len(y) - WINDOW_SAMPLES, hop):
38
- windows.append(y[start:start + WINDOW_SAMPLES])
39
- return windows
40
-
41
-
42
- def extract_features(y: np.ndarray, sr: int):
43
- windows = audio_to_windows(y, sr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  feats = []
45
- for w in windows:
46
- mfcc = librosa.feature.mfcc(y=w, sr=sr, n_mfcc=N_MFCC)
47
- mfcc_mean = mfcc.mean(axis=1)
48
- feats.append(mfcc_mean)
49
- return np.array(feats)
50
-
51
-
52
- def load_audio(file):
53
- y, sr = librosa.load(file, sr=SAMPLE_RATE, mono=True)
54
- return y, sr
55
-
56
-
57
- def train_model(audio_a, audio_b):
58
- global model_pipeline
59
-
60
- if audio_a is None or audio_b is None:
61
- return "Please record both Class A and Class B samples."
62
-
63
- y_a, sr_a = load_audio(audio_a)
64
- y_b, sr_b = load_audio(audio_b)
65
-
66
- X_a = extract_features(y_a, sr_a)
67
- X_b = extract_features(y_b, sr_b)
68
-
69
- y_labels = np.concatenate([
70
- np.zeros(len(X_a)),
71
- np.ones(len(X_b))
72
- ])
73
-
74
- X = np.vstack([X_a, X_b])
75
-
76
- model_pipeline = Pipeline([
77
  ("scaler", StandardScaler()),
78
- ("clf", LogisticRegression(max_iter=200))
79
  ])
80
-
81
- model_pipeline.fit(X, y_labels)
82
-
83
- return f"Model trained successfully. Windows used: {len(X)}"
84
-
85
-
86
- def predict(audio_test):
87
- global model_pipeline
88
-
89
- if model_pipeline is None:
90
- return "Model not trained yet."
91
-
92
- if audio_test is None:
93
- return "Please record a test sample."
94
-
95
- y, sr = load_audio(audio_test)
96
- X_test = extract_features(y, sr)
97
-
98
- probs = model_pipeline.predict_proba(X_test)
99
- avg_prob = probs.mean(axis=0)
100
-
101
- predicted_class = int(np.argmax(avg_prob))
102
-
103
- return (
104
- f"Prediction: {class_names[predicted_class]} (Confidence: {avg_prob[predicted_class]*100:.1f}%)"
105
- )
106
-
107
-
108
- with gr.Blocks(title="Phone Vibration Sound Classifier") as demo:
109
- gr.Markdown("# Phone Vibration Sound Classifier")
110
- gr.Markdown("Record two 1‑minute samples for two vibration sources, train the model, then test a third recording.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
  with gr.Row():
113
- audio_a = gr.Audio(sources=["microphone"], type="filepath", label="Record Class A (60 seconds)")
114
- audio_b = gr.Audio(sources=["microphone"], type="filepath", label="Record Class B (60 seconds)")
115
-
116
- train_btn = gr.Button("Train / Fine‑tune Model")
117
- train_status = gr.Textbox(label="Training Status")
118
-
119
- train_btn.click(train_model, inputs=[audio_a, audio_b], outputs=train_status)
120
-
121
- gr.Markdown("---")
122
-
123
- audio_test = gr.Audio(sources=["microphone"], type="filepath", label="Record Test Sample (up to 60 seconds)")
124
- predict_btn = gr.Button("Predict Class")
125
- prediction_output = gr.Textbox(label="Prediction Result")
126
-
127
- predict_btn.click(predict, inputs=audio_test, outputs=prediction_output)
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import librosa
4
+ import xgboost as xgb
5
+ import random
6
  from sklearn.preprocessing import StandardScaler
7
  from sklearn.pipeline import Pipeline
8
+ import difflib
 
9
 
10
+ # --- Constants ---
11
  SAMPLE_RATE = 16000
12
+ WINDOW_MS = 100
13
  WINDOW_SAMPLES = int(SAMPLE_RATE * WINDOW_MS / 1000)
14
  N_MFCC = 13
15
+ SILENCE_EMOJI = "_"
16
+ MIN_SEC = 3.0
17
+ MAX_SEC = 5.0
18
+
19
+ def generate_challenge():
20
+ length = random.randint(3, 5)
21
+ seq = []
22
+ for i in range(length):
23
+ seq.append(str(random.choice([0, 1])))
24
+ if i < length - 1:
25
+ seq.append(SILENCE_EMOJI)
26
+ # Return both the mission string and reset visibility to True
27
+ mission = " ".join(seq)
28
+ return mission, gr.update(visible=True, value=mission)
29
+
30
+ def hide_mission(audio_data):
31
+ """Hides the mission textbox once the referee has recorded audio."""
32
+ if audio_data is not None:
33
+ return gr.update(visible=False)
34
+ return gr.update(visible=True)
35
+
36
+
37
+ def post_process_to_emoji(preds, window_ms, min_silence_ms=200):
38
+ """Processes raw AI output, smooths it, enforces silence gaps, and merges duplicates."""
39
+ if len(preds) == 0: return ""
40
+
41
+ ms_per_step = window_ms / 2
42
+ min_silence_steps = int(min_silence_ms / ms_per_step)
43
+
44
+ # 1. Majority Vote Smoothing (Temporal Filtering)
45
+ # Reduces "flicker" where a single window might jump to a wrong class
46
+ smoothed = []
47
+ for i in range(len(preds)):
48
+ start = max(0, i - 1)
49
+ end = min(len(preds), i + 2)
50
+ neighborhood = list(preds[start:end])
51
+ smoothed.append(max(set(neighborhood), key=neighborhood.count))
52
+
53
+ # 2. Silence Enforcement & Transition Logic
54
+ # We only allow a change of class if the silence buffer is respected
55
+ intermediate_sequence = []
56
+ last_val = -1
57
+ silence_count = 0
58
+
59
+ for p in smoothed:
60
+ p = int(p)
61
+ if p == 2: # Silence Class
62
+ silence_count += 1
63
+ if last_val != 2:
64
+ intermediate_sequence.append(2)
65
+ last_val = 2
66
+ else: # Sound Class (0 or 1)
67
+ if last_val != p:
68
+ # If we were in silence, check if the gap was long enough
69
+ if last_val == -1 or (last_val == 2 and silence_count >= min_silence_steps):
70
+ intermediate_sequence.append(p)
71
+ last_val = p
72
+ silence_count = 0
73
+ # If we are jumping directly from 0 to 1 without silence,
74
+ # we ignore it or force silence (depending on game strictness)
75
+
76
+ # 3. Final Merge (The "100110" -> "1010" logic)
77
+ # This removes any accidental back-to-back duplicates
78
+ # print("Intermediate Sequence (post-silence enforcement):", intermediate_sequence)
79
+ final_output = []
80
+ for val in intermediate_sequence:
81
+ # print(f"Processing value: {val}")
82
+ if val != 2:
83
+ # Map back to emoji for silence or string for numbers
84
+ # symbol = SILENCE_EMOJI if val == 2 else str(val)
85
+
86
+ if not final_output or val != final_output[-1]:
87
+ final_output.append(str(val))
88
+ # print(f"Added {val} to final output {final_output}")
89
+
90
+ final_output=[char+"_" for char in final_output]
91
+
92
+ return "".join(final_output[:-1]) # Remove trailing silence if exists
93
+
94
+ def extract_features_sequence(audio_path,validate_duration=True):
95
+ if audio_path is None: return None
96
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE, mono=True)
97
+ if len(y) < WINDOW_SAMPLES:
98
+ return None, f"Audio too short ({len(y)/SAMPLE_RATE:.1f}s), needs to be at least {WINDOW_MS/1000:.1f}s."
99
+ elif validate_duration and len(y) > SAMPLE_RATE * 5: # Limit to 30 seconds for performance
100
+ print(f"Audio too long ({len(y)/SAMPLE_RATE:.1f}s), truncating to 5s for feature extraction.")
101
+ y = y[:SAMPLE_RATE * 5]
102
+
103
+ hop = WINDOW_SAMPLES // 2 # 50% overlap for smoother sequence detection
104
  feats = []
105
+ for start in range(0, len(y) - WINDOW_SAMPLES, hop):
106
+ w = y[start:start + WINDOW_SAMPLES]
107
+ mfcc = librosa.feature.mfcc(y=w, sr=sr, n_mfcc=N_MFCC, n_fft=512)
108
+ feats.append(mfcc.mean(axis=1))
109
+ return np.array(feats), "OK"
110
+
111
+ def train_player_model(a0, a1, a_silence, player_name):
112
+ X0, msg0 = extract_features_sequence(a0, validate_duration=True)
113
+ X1, msg1 = extract_features_sequence(a1, validate_duration=True)
114
+ X_sil, msg_sil = extract_features_sequence(a_silence, validate_duration=True)
115
+
116
+ if X0 is None: return None, f"{player_name} Source 0: {msg0}"
117
+ if X1 is None: return None, f"{player_name} Source 1: {msg1}"
118
+ if X_sil is None: return None, f"{player_name} Silence: {msg_sil}"
119
+
120
+ X = np.vstack([X0, X1, X_sil])
121
+ y = np.concatenate([np.zeros(len(X0)), np.ones(len(X1)), np.full(len(X_sil), 2)])
122
+
123
+ print(f"{player_name} - Training model with {len(X)} samples: {len(X0)} Source 0, {len(X1)} Source 1, {len(X_sil)} Silence")
124
+
125
+ model = Pipeline([
 
 
 
 
 
 
 
 
 
 
 
126
  ("scaler", StandardScaler()),
127
+ ("clf", xgb.XGBClassifier(n_estimators=50, max_depth=3, objective='multi:softprob', num_class=3))
128
  ])
129
+ model.fit(X, y)
130
+ print(f"{player_name} model trained successfully with {len(X)} samples!")
131
+ return model, "OK"
132
+
133
+ def play_game(target_display, ref_audio, p1_0, p1_1, p1_s, p2_0, p2_1, p2_s):
134
+ # Validation and Training logic...
135
+ m1, err1 = train_player_model(p1_0, p1_1, p1_s, "Player 1")
136
+ if m1 is None: return f"### ❌ {err1}"
137
+
138
+ m2, err2 = train_player_model(p2_0, p2_1, p2_s, "Player 2")
139
+ if m2 is None: return f"### ❌ {err2}"
140
+
141
+ if not ref_audio: return "### ⚠️ Referee recording missing!"
142
+
143
+ X_ref, _ = extract_features_sequence(ref_audio, validate_duration=False)
144
+ target_numeric = target_display.replace(" ", "").replace(SILENCE_EMOJI, "2")
145
+
146
+ res1_emoji = post_process_to_emoji(m1.predict(X_ref), WINDOW_MS)
147
+ res2_emoji = post_process_to_emoji(m2.predict(X_ref), WINDOW_MS)
148
+
149
+ res1_num = res1_emoji.replace(SILENCE_EMOJI, "2")
150
+ res2_num = res2_emoji.replace(SILENCE_EMOJI, "2")
151
+
152
+ score1 = round(difflib.SequenceMatcher(None, target_numeric, res1_num).ratio() * 100, 1)
153
+ score2 = round(difflib.SequenceMatcher(None, target_numeric, res2_num).ratio() * 100, 1)
154
+
155
+ winner = "Player 1" if score1 > score2 else "Player 2"
156
+ if score1 == score2: winner = "It's a Tie!"
157
+
158
+ # Formatting results with Large Markdown
159
+ return f"""
160
+ # 🏁 BATTLE RESULTS
161
+
162
+ ## 🎯 Mission Target: {target_display}
163
+
164
+ ---
165
+ ## πŸ‘€ Player 1 `{res1_emoji}` | **Accuracy:** `{score1}%`
166
+
167
+ ## πŸ‘€ Player 2 `{res2_emoji}` | **Accuracy:** `{score2}%`
168
+
169
+ ---
170
+ # πŸ† WINNER: <span style="color: #ff4b4b; font-size: 40px;">{winner}</span>
171
+ """
172
+
173
+ # --- Gradio UI ---
174
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
175
+ gr.Markdown("# πŸŽ™οΈ The AI Sequence Battle")
176
+
177
+ # Store the mission in a hidden state so we can still use it for scoring even when invisible
178
+ hidden_target = gr.State("")
179
 
180
  with gr.Row():
181
+ target_seq_ui = gr.Textbox(label="πŸ“’ Referee's Mission (Memorize this!)", interactive=False)
182
+ refresh_btn = gr.Button("πŸ”„ New Mission")
183
+
184
+ # On load and on refresh, update both the UI and the State
185
+ demo.load(generate_challenge, outputs=[hidden_target, target_seq_ui])
186
+ refresh_btn.click(generate_challenge, outputs=[hidden_target, target_seq_ui])
187
+
188
+ with gr.Accordion("βš–οΈ Step 1: The Referee", open=True):
189
+ ref_audio = gr.Audio(sources=["microphone"], type="filepath", label="Record the Mission")
190
+ # Trigger hiding when audio is recorded
191
+ ref_audio.change(hide_mission, inputs=ref_audio, outputs=target_seq_ui)
 
 
 
 
192
 
193
+ with gr.Row():
194
+ with gr.Column():
195
+ gr.Markdown("### πŸ‘€ Player 1 (3-5s samples)")
196
+ p1_0 = gr.Audio(sources=["microphone"], type="filepath", label="Source 0")
197
+ p1_1 = gr.Audio(sources=["microphone"], type="filepath", label="Source 1")
198
+ p1_s = gr.Audio(sources=["microphone"], type="filepath", label="Silence 🀫")
199
+ with gr.Column():
200
+ gr.Markdown("### πŸ‘€ Player 2 (3-5s samples)")
201
+ p2_0 = gr.Audio(sources=["microphone"], type="filepath", label="Source 0")
202
+ p2_1 = gr.Audio(sources=["microphone"], type="filepath", label="Source 1")
203
+ p2_s = gr.Audio(sources=["microphone"], type="filepath", label="Silence 🀫")
204
+
205
+ btn_fight = gr.Button("πŸ”₯ REVEAL WINNER", variant="primary", size="lg")
206
+
207
+ # Using Markdown for large, styled text results
208
+ result_display = gr.Markdown("### Results will appear here after the battle!")
209
+
210
+ btn_fight.click(
211
+ play_game,
212
+ inputs=[hidden_target, ref_audio, p1_0, p1_1, p1_s, p2_0, p2_1, p2_s],
213
+ outputs=result_display
214
+ )
215
 
216
+ demo.launch()
requirements.txt CHANGED
@@ -2,4 +2,5 @@ gradio
2
  numpy
3
  librosa
4
  scikit-learn
5
- soundfile
 
 
2
  numpy
3
  librosa
4
  scikit-learn
5
+ soundfile
6
+ xgboost