AIOmarRehan commited on
Commit
169886c
Β·
verified Β·
1 Parent(s): 6fa015b

Upload Old app.py

Browse files
Files changed (1) hide show
  1. Old app.py +112 -0
Old app.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from PIL import Image
4
+ from app.preprocess import preprocess_audio
5
+ from app.model import predict
6
+ from collections import Counter, defaultdict
7
+ import librosa
8
+
9
+
10
+ # IMAGE HANDLING
11
+ def safe_load_image(img):
12
+ """
13
+ Ensure the input is a valid PIL RGBA image.
14
+ Gradio sometimes gives numpy arrays β†’ we convert safely.
15
+ """
16
+ if img is None:
17
+ return None
18
+
19
+ # If numpy array β†’ convert to PIL
20
+ if isinstance(img, np.ndarray):
21
+ img = Image.fromarray(img)
22
+
23
+ # Convert to RGBA, to make sure the Alpha channel keep
24
+ img = img.convert("RGBA")
25
+ return img
26
+
27
+
28
+ # PROCESS SPECTROGRAM IMAGE
29
+ def process_image_input(img):
30
+ img = safe_load_image(img)
31
+ label, confidence, probs = predict(img)
32
+ return label, round(confidence, 3), probs
33
+
34
+
35
+ # PROCESS RAW AUDIO
36
+ def process_audio_input(audio_path):
37
+
38
+ imgs = preprocess_audio(audio_path) # returns list of PIL RGBA images
39
+
40
+ all_preds = []
41
+ all_confs = []
42
+ all_probs = []
43
+
44
+ for img in imgs:
45
+ label, conf, probs = predict(img)
46
+ all_preds.append(label)
47
+ all_confs.append(conf)
48
+ all_probs.append(probs)
49
+
50
+ # Majority vote
51
+ counter = Counter(all_preds)
52
+ max_count = max(counter.values())
53
+ candidates = [k for k, v in counter.items() if v == max_count]
54
+
55
+ if len(candidates) == 1:
56
+ final_label = candidates[0]
57
+ else:
58
+ conf_sums = defaultdict(float)
59
+ for i, label in enumerate(all_preds):
60
+ if label in candidates:
61
+ conf_sums[label] += all_confs[i]
62
+ final_label = max(conf_sums, key=conf_sums.get)
63
+
64
+ final_conf = float(
65
+ np.mean([all_confs[i] for i, lbl in enumerate(all_preds) if lbl == final_label])
66
+ )
67
+
68
+ return final_label, round(final_conf, 3), all_preds, [round(c, 3) for c in all_confs]
69
+
70
+
71
+ # MAIN CLASSIFIER
72
+ def classify(audio_path, image):
73
+
74
+ # If spectrogram image
75
+ if image is not None:
76
+ label, conf, probs = process_image_input(image)
77
+ return {
78
+ "Final Label": label,
79
+ "Confidence": conf,
80
+ "Details": probs
81
+ }
82
+
83
+ # If raw audio
84
+ if audio_path is not None:
85
+ label, conf, all_preds, all_confs = process_audio_input(audio_path)
86
+ return {
87
+ "Final Label": label,
88
+ "Confidence": conf,
89
+ "All Chunk Labels": all_preds,
90
+ "All Chunk Confidences": all_confs
91
+ }
92
+
93
+ return "Please upload an audio file OR a spectrogram image."
94
+
95
+
96
+ # GRADIO UI
97
+ interface = gr.Interface(
98
+ fn=classify,
99
+ inputs=[
100
+ gr.Audio(type="filepath", label="Upload Audio (WAV/MP3)"),
101
+ gr.Image(type="pil", label="Upload Spectrogram Image (PNG RGBA Supported)")
102
+ ],
103
+ outputs=gr.JSON(label="Prediction Results"),
104
+ title="General Audio Classifier (Audio + Spectrogram Support)",
105
+ description=(
106
+ "Upload a raw audio file OR a spectrogram image.\n"
107
+ "If audio β†’ model preprocesses into mel-spectrogram chunks.\n"
108
+ "If image β†’ model classifies the spectrogram directly.\n"
109
+ ),
110
+ )
111
+
112
+ interface.launch()