AIOmarRehan commited on
Commit
05b56c7
·
verified ·
1 Parent(s): dc4e2c1

Upload 7 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Notebook/Audio_Classification.ipynb filter=lfs diff=lfs merge=lfs -text
37
+ Results/Spectrogram_CNN_Audio_Classification.mp4 filter=lfs diff=lfs merge=lfs -text
Notebook/Audio_Classification.ipynb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cb22d1805365cd4f49be6a64c2423bdee83c0e733f559aa8522e548abeec0f8
3
+ size 27944834
Results/Spectrogram_CNN_Audio_Classification.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0554281b204fb323d23c248d818442ec755b81763947b384c7564a4e0143be5
3
+ size 9934237
app/main.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File
2
+ from fastapi.responses import JSONResponse
3
+ import tempfile, shutil
4
+ from app.preprocess import preprocess_audio
5
+ from app.model import predict
6
+ import numpy as np
7
+
8
+ app = FastAPI(title="General Audio Classifier")
9
+
10
+ @app.post("/predict")
11
+ async def predict_audio(file: UploadFile = File(...)):
12
+ try:
13
+ # Save uploaded file temporarily
14
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
15
+ shutil.copyfileobj(file.file, tmp)
16
+ tmp_path = tmp.name
17
+
18
+ # Preprocess → multiple spectrograms
19
+ imgs = preprocess_audio(tmp_path)
20
+
21
+ # Predict all chunks
22
+ all_preds = []
23
+ all_confidences = []
24
+ for img in imgs:
25
+ label, confidence, probs = predict(img)
26
+ all_preds.append(label)
27
+ all_confidences.append(confidence)
28
+
29
+ # Combine predictions (majority vote with confidence tiebreaker)
30
+ from collections import Counter, defaultdict
31
+ counter = Counter(all_preds)
32
+ max_count = max(counter.values())
33
+ candidates = [k for k, v in counter.items() if v == max_count]
34
+
35
+ if len(candidates) == 1:
36
+ final_label = candidates[0]
37
+ else:
38
+ # Tie-breaker using sum of confidences
39
+ confidence_sums = defaultdict(float)
40
+ for i, label in enumerate(all_preds):
41
+ if label in candidates:
42
+ confidence_sums[label] += all_confidences[i]
43
+ final_label = max(confidence_sums, key=confidence_sums.get)
44
+
45
+ # Average confidence for final label
46
+ final_confidence = np.mean([all_confidences[i] for i, label in enumerate(all_preds) if label == final_label])
47
+
48
+ return JSONResponse(content={
49
+ "predicted_label": final_label,
50
+ "confidence": round(final_confidence, 3),
51
+ "all_predictions": all_preds,
52
+ "all_confidences": [round(c,3) for c in all_confidences]
53
+ })
54
+
55
+ except Exception as e:
56
+ return JSONResponse(content={"error": str(e)}, status_code=500)
app/model.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import numpy as np
3
+
4
+ # Load trained model
5
+ model = tf.keras.models.load_model("saved_model/Audio_Model_Classification.h5")
6
+
7
+ # IMPORTANT: Keep same order as training (alphabetical default in TF)
8
+ CLASS_NAMES = ["Baby Cry", "Chainsaw", "Clock Tick", "Cow", "Dog", "Fire Crackling", "Frog", "Helicopter", "Person Sneeze", "Pig", "Rain", "Rooster", "Sea Waves"]
9
+
10
+ def predict(img):
11
+ # Convert PIL image → numpy array
12
+ img = np.array(img).astype("float32") / 255.0 # normalize [0,1]
13
+
14
+ # Resize to match training target (231x232)
15
+ img = tf.image.resize(img, (231, 232)) # (231, 232, 4)
16
+
17
+ # Add batch dimension
18
+ img = np.expand_dims(img, axis=0) # (1, 231, 232, 4)
19
+
20
+ # Predict
21
+ preds = model.predict(img)
22
+ probs = preds[0]
23
+
24
+ class_idx = int(np.argmax(probs))
25
+ confidence = float(np.max(probs))
26
+ prob_dict = {CLASS_NAMES[i]: float(probs[i]) for i in range(len(CLASS_NAMES))}
27
+
28
+ return CLASS_NAMES[class_idx], confidence, prob_dict
29
+
30
+ # THIS OUR PAST VERSION OF... IT WAS OUT RESIZING BUT EVEN THOUGH THE MODEL WAS WORKING FINE, SO, MAYBE TF COULD ACCEPT DYNAMIC SIZES OF INPUT IMAGES
31
+ # def predict(img):
32
+ # # Convert to numpy array (RGBA)
33
+ # img = np.array(img) / 255.0 # shape (H, W, 4)
34
+ # img = np.expand_dims(img, axis=0) # (1, H, W, 4)
35
+ #
36
+ # # Predict
37
+ # preds = model.predict(img)
38
+ # probs = preds[0]
39
+ #
40
+ # class_idx = int(np.argmax(probs))
41
+ # confidence = float(np.max(probs))
42
+ # prob_dict = {CLASS_NAMES[i]: float(probs[i]) for i in range(len(CLASS_NAMES))}
43
+ #
44
+ # return CLASS_NAMES[class_idx], confidence, prob_dict
app/preprocess.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import librosa
2
+ import librosa.display
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import io
6
+ from PIL import Image
7
+
8
+ # Audio parameters
9
+ SR = 16000
10
+ N_FFT = 1024
11
+ HOP_LENGTH = 512
12
+ N_MELS = 128
13
+ TARGET_DURATION = 5.0
14
+ TARGET_LENGTH = int(TARGET_DURATION * SR)
15
+
16
+ def preprocess_audio(file_path):
17
+ # Load audio (force mono)
18
+ y, sr = librosa.load(file_path, sr=None, mono=True)
19
+
20
+ # Normalize amplitude
21
+ peak = np.abs(y).max()
22
+ if peak > 0:
23
+ y = y / peak * 0.99
24
+
25
+ # Resample
26
+ if sr != SR:
27
+ y = librosa.resample(y, orig_sr=sr, target_sr=SR)
28
+
29
+ # Split audio into 5s chunks
30
+ chunks = []
31
+ for start in range(0, len(y), TARGET_LENGTH):
32
+ chunk = y[start:start + TARGET_LENGTH]
33
+ if len(chunk) < TARGET_LENGTH:
34
+ chunk = np.pad(chunk, (0, TARGET_LENGTH - len(chunk)), mode="constant")
35
+
36
+ # Convert to Mel-spectrogram
37
+ S = librosa.feature.melspectrogram(
38
+ y=chunk, sr=SR, n_fft=N_FFT, hop_length=HOP_LENGTH, n_mels=N_MELS
39
+ )
40
+ S_dB = librosa.power_to_db(S, ref=np.max)
41
+
42
+ # Convert spectrogram to RGBA image
43
+ fig = plt.figure(figsize=(3, 3))
44
+ librosa.display.specshow(S_dB, sr=SR, hop_length=HOP_LENGTH, cmap="magma")
45
+ plt.axis("off")
46
+
47
+ buf = io.BytesIO()
48
+ plt.savefig(buf, format="png", bbox_inches="tight", pad_inches=0)
49
+ plt.close(fig)
50
+
51
+ buf.seek(0)
52
+ img = Image.open(buf).convert("RGBA") # 4 channels
53
+ chunks.append(img)
54
+
55
+ return chunks
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #fastapi
2
+ #uvicorn
3
+ tensorflow
4
+ numpy
5
+ librosa
6
+ matplotlib
7
+ soundfile
8
+ #python-multipart
9
+ gradio
10
+ Pillow
saved_model/Audio_Model_Classification.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceef1269f64afc26d31dc35e4bcacf68c2d91181aa28afeecec0e2403aabf739
3
+ size 22083448