tventurella commited on
Commit
ec7f247
·
verified ·
1 Parent(s): ef602ad

Upload 5 files

Browse files
best_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6505900f51cdd56c9be6151b7e4f17d1f56ca4c4ef32def283e8f2593d93ef20
3
+ size 2153593
clap_tag_embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77a1c2e79348506a65d66721c1c1ef4653f390c592919cfc946f2c29392b750b
3
+ size 27072640
mulan_tag_embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a734a90cab5a237c649201fd067cecaf5bbd8dcc88361e10821a5469474351fb
3
+ size 27072640
music_tagger_gui.py ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Gradio Music Tagging GUI
3
+
4
+ A simple web interface for tagging music using:
5
+ 1. Zero-Shot: CLAP + MuLan models with pre-computed tag embeddings
6
+ 2. MTG-Jamendo: Trained MERT classifier
7
+
8
+ Usage:
9
+ python music_tagger_gui.py
10
+
11
+ Requirements:
12
+ pip install -r requirements_gui.txt
13
+ """
14
+
15
+ import os
16
+ import torch
17
+ import numpy as np
18
+ import librosa
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ import gradio as gr
22
+ from pathlib import Path
23
+
24
+
25
+ # ============================================================
26
+ # Configuration
27
+ # ============================================================
28
+ class Config:
29
+ """Configuration for both inference methods."""
30
+
31
+ # Device
32
+ device = "cuda" if torch.cuda.is_available() else "cpu"
33
+
34
+ # Zero-Shot paths (relative to script directory)
35
+ clap_embeddings_path = "clap_tag_embeddings.npy"
36
+ mulan_embeddings_path = "mulan_tag_embeddings.npy"
37
+ tag_names_path = "musiccaps_tag_names.txt"
38
+
39
+ # MTG-Jamendo paths
40
+ mtg_checkpoint_path = "best_model.pt"
41
+
42
+ # MERT settings (for MTG-Jamendo)
43
+ mert_model_name = "m-a-p/MERT-v1-95M"
44
+ mert_layer = 11
45
+ mert_feature_dim = 768
46
+ mert_sample_rate = 24000
47
+ max_duration = 30 # seconds
48
+
49
+
50
+ config = Config()
51
+
52
+ # Get script directory for relative paths
53
+ SCRIPT_DIR = Path(__file__).parent.resolve()
54
+
55
+
56
+ # ============================================================
57
+ # Model Classes
58
+ # ============================================================
59
+ class GenreClassifier(nn.Module):
60
+ """Classifier for MTG-Jamendo trained model."""
61
+
62
+ def __init__(self, input_dim=768, num_classes=50, hidden_dim=512, dropout=0.3):
63
+ super().__init__()
64
+ self.classifier = nn.Sequential(
65
+ nn.Linear(input_dim, hidden_dim),
66
+ nn.ReLU(),
67
+ nn.Dropout(dropout),
68
+ nn.Linear(hidden_dim, hidden_dim // 2),
69
+ nn.ReLU(),
70
+ nn.Dropout(dropout),
71
+ nn.Linear(hidden_dim // 2, num_classes)
72
+ )
73
+
74
+ def forward(self, x):
75
+ return self.classifier(x)
76
+
77
+
78
+ # ============================================================
79
+ # Global Model Cache (lazy loading)
80
+ # ============================================================
81
+ class ModelCache:
82
+ """Lazy-loaded model cache to avoid loading models until needed."""
83
+
84
+ def __init__(self):
85
+ self._zero_shot_models = None
86
+ self._zero_shot_embeddings = None
87
+ self._zero_shot_tag_names = None
88
+ self._mtg_model = None
89
+ self._mtg_tag_names = None
90
+ self._mert_model = None
91
+ self._mert_processor = None
92
+
93
+ def get_zero_shot_models(self):
94
+ """Load and cache Zero-Shot models (CLAP + MuLan)."""
95
+ if self._zero_shot_models is None:
96
+ print("Loading Zero-Shot models (this may take a moment)...")
97
+
98
+ # Import here to avoid loading if not needed
99
+ try:
100
+ from muq import MuQMuLan
101
+ from transformers import ClapModel, ClapProcessor
102
+ except ImportError as e:
103
+ raise ImportError(
104
+ f"Missing dependencies for Zero-Shot tagging: {e}\n"
105
+ "Install with: pip install muq laion-clap transformers"
106
+ )
107
+
108
+ # Load MuLan
109
+ print(" Loading MuQ-MuLan...")
110
+ mulan_model = MuQMuLan.from_pretrained("OpenMuQ/MuQ-MuLan-large")
111
+ mulan_model = mulan_model.to(config.device).eval()
112
+
113
+ # Load CLAP
114
+ print(" Loading CLAP...")
115
+ clap_model = ClapModel.from_pretrained("laion/larger_clap_music_and_speech")
116
+ clap_model = clap_model.to(config.device).eval()
117
+ clap_processor = ClapProcessor.from_pretrained("laion/larger_clap_music_and_speech")
118
+
119
+ self._zero_shot_models = (mulan_model, clap_model, clap_processor)
120
+ print(" Zero-Shot models loaded!")
121
+
122
+ return self._zero_shot_models
123
+
124
+ def get_zero_shot_embeddings(self):
125
+ """Load and cache pre-computed tag embeddings."""
126
+ if self._zero_shot_embeddings is None:
127
+ clap_path = SCRIPT_DIR / config.clap_embeddings_path
128
+ mulan_path = SCRIPT_DIR / config.mulan_embeddings_path
129
+
130
+ if not clap_path.exists() or not mulan_path.exists():
131
+ raise FileNotFoundError(
132
+ f"Pre-computed embeddings not found!\n"
133
+ f"Expected:\n"
134
+ f" - {clap_path}\n"
135
+ f" - {mulan_path}\n\n"
136
+ f"Run create_embeddings.py first to generate these files."
137
+ )
138
+
139
+ print("Loading pre-computed embeddings...")
140
+ clap_embeddings = np.load(str(clap_path))
141
+ mulan_embeddings = np.load(str(mulan_path))
142
+
143
+ self._zero_shot_embeddings = (clap_embeddings, mulan_embeddings)
144
+ print(f" Loaded embeddings: CLAP {clap_embeddings.shape}, MuLan {mulan_embeddings.shape}")
145
+
146
+ return self._zero_shot_embeddings
147
+
148
+ def get_zero_shot_tag_names(self):
149
+ """Load and cache tag names for Zero-Shot tagging."""
150
+ if self._zero_shot_tag_names is None:
151
+ tag_path = SCRIPT_DIR / config.tag_names_path
152
+
153
+ if not tag_path.exists():
154
+ raise FileNotFoundError(
155
+ f"Tag names file not found: {tag_path}\n"
156
+ "Run create_embeddings.py first to generate this file."
157
+ )
158
+
159
+ with open(tag_path, 'r', encoding='utf-8') as f:
160
+ self._zero_shot_tag_names = [line.strip() for line in f if line.strip()]
161
+
162
+ print(f" Loaded {len(self._zero_shot_tag_names)} tag names")
163
+
164
+ return self._zero_shot_tag_names
165
+
166
+ def get_mtg_model(self):
167
+ """Load and cache MTG-Jamendo trained model."""
168
+ if self._mtg_model is None:
169
+ checkpoint_path = SCRIPT_DIR / config.mtg_checkpoint_path
170
+
171
+ if not checkpoint_path.exists():
172
+ raise FileNotFoundError(
173
+ f"MTG-Jamendo checkpoint not found: {checkpoint_path}\n"
174
+ "Train the model first using mtg_jamendo_training.py"
175
+ )
176
+
177
+ print("Loading MTG-Jamendo model...")
178
+ checkpoint = torch.load(str(checkpoint_path), map_location=config.device, weights_only=False)
179
+
180
+ # Get tag names (handle both old and new keys)
181
+ self._mtg_tag_names = checkpoint.get('tag_names', checkpoint.get('genre_names', []))
182
+
183
+ # Initialize and load model
184
+ model = GenreClassifier(
185
+ input_dim=config.mert_feature_dim,
186
+ num_classes=len(self._mtg_tag_names)
187
+ )
188
+ model.load_state_dict(checkpoint['model_state_dict'])
189
+ model = model.to(config.device).eval()
190
+
191
+ self._mtg_model = model
192
+ print(f" Loaded model with {len(self._mtg_tag_names)} tags")
193
+ print(f" Validation accuracy: {checkpoint.get('val_acc', 0)*100:.2f}%")
194
+
195
+ return self._mtg_model, self._mtg_tag_names
196
+
197
+ def get_mert_model(self):
198
+ """Load and cache MERT model for feature extraction."""
199
+ if self._mert_model is None:
200
+ try:
201
+ from transformers import Wav2Vec2FeatureExtractor, AutoModel
202
+ except ImportError as e:
203
+ raise ImportError(
204
+ f"Missing transformers library: {e}\n"
205
+ "Install with: pip install transformers"
206
+ )
207
+
208
+ print("Loading MERT model...")
209
+ self._mert_processor = Wav2Vec2FeatureExtractor.from_pretrained(config.mert_model_name)
210
+ self._mert_model = AutoModel.from_pretrained(config.mert_model_name, trust_remote_code=True)
211
+ self._mert_model = self._mert_model.to(config.device).eval()
212
+ print(" MERT model loaded!")
213
+
214
+ return self._mert_model, self._mert_processor
215
+
216
+
217
+ # Global model cache
218
+ model_cache = ModelCache()
219
+
220
+
221
+ # ============================================================
222
+ # Inference Functions
223
+ # ============================================================
224
+ @torch.no_grad()
225
+ def tag_audio_zero_shot(audio_path: str, top_k: int = 20, normalization: str = "individual"):
226
+ """
227
+ Tag audio using Zero-Shot CLAP + MuLan approach.
228
+
229
+ Args:
230
+ audio_path: Path to audio file
231
+ top_k: Number of top tags to return
232
+ normalization: "mulan_only", "global", or "individual"
233
+
234
+ Returns:
235
+ List of (tag_name, confidence) tuples
236
+ """
237
+ # Load models and embeddings
238
+ mulan_model, clap_model, clap_processor = model_cache.get_zero_shot_models()
239
+ clap_embeddings, mulan_embeddings = model_cache.get_zero_shot_embeddings()
240
+ tag_names = model_cache.get_zero_shot_tag_names()
241
+
242
+ # Ensure embedding/tag count matches
243
+ min_len = min(len(tag_names), len(clap_embeddings), len(mulan_embeddings))
244
+ if len(tag_names) != min_len:
245
+ tag_names = tag_names[:min_len]
246
+ clap_embeddings = clap_embeddings[:min_len]
247
+ mulan_embeddings = mulan_embeddings[:min_len]
248
+
249
+ # Embed audio with MuLan (24kHz)
250
+ wav_mulan, _ = librosa.load(audio_path, sr=24000, mono=True)
251
+ wavs = torch.tensor(wav_mulan, dtype=torch.float32).unsqueeze(0).to(config.device)
252
+ mulan_audio_embed = mulan_model(wavs=wavs)
253
+
254
+ # Embed audio with CLAP (48kHz)
255
+ wav_clap, _ = librosa.load(audio_path, sr=48000, mono=True)
256
+ inputs = clap_processor(audio=wav_clap, sampling_rate=48000, return_tensors="pt").to(config.device)
257
+ clap_audio_embed = clap_model.get_audio_features(**inputs)
258
+
259
+ # Convert embeddings to tensors
260
+ mulan_text_e = torch.tensor(mulan_embeddings, dtype=torch.float32).to(config.device)
261
+ clap_text_e = torch.tensor(clap_embeddings, dtype=torch.float32).to(config.device)
262
+
263
+ # Calculate similarities
264
+ mulan_sims = F.cosine_similarity(mulan_audio_embed, mulan_text_e, dim=1)
265
+ clap_sims = F.cosine_similarity(clap_audio_embed, clap_text_e, dim=1)
266
+
267
+ # Apply normalization strategy
268
+ if normalization == "mulan_only":
269
+ combined = mulan_sims
270
+ elif normalization == "global":
271
+ all_sims = torch.cat([mulan_sims, clap_sims])
272
+ g_min, g_max = all_sims.min(), all_sims.max()
273
+ mulan_norm = (mulan_sims - g_min) / (g_max - g_min + 1e-8)
274
+ clap_norm = (clap_sims - g_min) / (g_max - g_min + 1e-8)
275
+ combined = 0.5 * mulan_norm + 0.5 * clap_norm
276
+ else: # individual
277
+ mulan_norm = (mulan_sims - mulan_sims.min()) / (mulan_sims.max() - mulan_sims.min() + 1e-8)
278
+ clap_norm = (clap_sims - clap_sims.min()) / (clap_sims.max() - clap_sims.min() + 1e-8)
279
+ combined = 0.5 * mulan_norm + 0.5 * clap_norm
280
+
281
+ # Get top predictions
282
+ top_scores, top_idx = torch.topk(combined, k=min(top_k, len(tag_names)))
283
+
284
+ predictions = []
285
+ for i, idx in enumerate(top_idx):
286
+ tag = tag_names[idx.item()]
287
+ score = top_scores[i].item()
288
+ predictions.append((tag, score))
289
+
290
+ return predictions
291
+
292
+
293
+ @torch.no_grad()
294
+ def tag_audio_mtg_jamendo(audio_path: str, top_k: int = 10):
295
+ """
296
+ Tag audio using trained MTG-Jamendo MERT classifier.
297
+
298
+ Args:
299
+ audio_path: Path to audio file
300
+ top_k: Number of top tags to return
301
+
302
+ Returns:
303
+ List of (tag_name, confidence) tuples
304
+ """
305
+ # Load models
306
+ model, tag_names = model_cache.get_mtg_model()
307
+ mert_model, processor = model_cache.get_mert_model()
308
+
309
+ # Load and preprocess audio
310
+ wav, sr = librosa.load(audio_path, sr=config.mert_sample_rate, mono=True)
311
+
312
+ # Limit to max duration
313
+ max_samples = config.mert_sample_rate * config.max_duration
314
+ if len(wav) > max_samples:
315
+ wav = wav[:max_samples]
316
+
317
+ # Extract MERT features
318
+ inputs = processor(wav, sampling_rate=config.mert_sample_rate, return_tensors="pt")
319
+ inputs = {k: v.to(config.device) for k, v in inputs.items()}
320
+
321
+ outputs = mert_model(**inputs, output_hidden_states=True)
322
+ embeddings = outputs.hidden_states[config.mert_layer]
323
+ features = embeddings.mean(dim=1).squeeze(0)
324
+
325
+ # Predict
326
+ logits = model(features.unsqueeze(0))
327
+ probs = torch.softmax(logits, dim=1).squeeze(0).cpu().numpy()
328
+
329
+ # Get top predictions
330
+ top_indices = np.argsort(probs)[::-1][:top_k]
331
+ predictions = [(tag_names[i], float(probs[i])) for i in top_indices]
332
+
333
+ return predictions
334
+
335
+
336
+ # ============================================================
337
+ # Gradio Interface Functions
338
+ # ============================================================
339
+ def format_predictions(predictions: list, method: str) -> str:
340
+ """Format predictions as a readable string."""
341
+ if not predictions:
342
+ return "No predictions available."
343
+
344
+ lines = [f"## {method} Results\n"]
345
+
346
+ for i, (tag, score) in enumerate(predictions, 1):
347
+ # Create a visual bar
348
+ bar_length = int(score * 30)
349
+ bar = "█" * bar_length + "░" * (30 - bar_length)
350
+ lines.append(f"{i:2d}. **{tag}** — {score*100:.1f}% `{bar}`")
351
+
352
+ return "\n".join(lines)
353
+
354
+
355
+ def analyze_audio(audio_file, method: str, normalization: str, top_k: int):
356
+ """
357
+ Main analysis function called by Gradio interface.
358
+
359
+ Args:
360
+ audio_file: Uploaded audio file path
361
+ method: "Zero-Shot (CLAP + MuLan)" or "MTG-Jamendo (MERT)"
362
+ normalization: Normalization strategy for Zero-Shot
363
+ top_k: Number of top tags to show
364
+
365
+ Returns:
366
+ Formatted prediction results
367
+ """
368
+ if audio_file is None:
369
+ return "Please upload an audio file."
370
+
371
+ try:
372
+ if method == "Zero-Shot (CLAP + MuLan)":
373
+ norm_map = {
374
+ "Individual (recommended)": "individual",
375
+ "Global": "global",
376
+ "MuLan Only": "mulan_only"
377
+ }
378
+ predictions = tag_audio_zero_shot(
379
+ audio_file,
380
+ top_k=int(top_k),
381
+ normalization=norm_map.get(normalization, "individual")
382
+ )
383
+ return format_predictions(predictions, "Zero-Shot Tagging")
384
+
385
+ elif method == "MTG-Jamendo (MERT)":
386
+ predictions = tag_audio_mtg_jamendo(audio_file, top_k=int(top_k))
387
+ return format_predictions(predictions, "MTG-Jamendo Tagging")
388
+
389
+ else:
390
+ return f"Unknown method: {method}"
391
+
392
+ except FileNotFoundError as e:
393
+ return f"**Error: Missing Required Files**\n\n{str(e)}"
394
+ except ImportError as e:
395
+ return f"**Error: Missing Dependencies**\n\n{str(e)}"
396
+ except Exception as e:
397
+ return f"**Error during analysis:**\n\n{str(e)}"
398
+
399
+
400
+ def check_available_methods():
401
+ """Check which methods are available based on existing files."""
402
+ available = []
403
+ messages = []
404
+
405
+ # Check Zero-Shot files
406
+ clap_exists = (SCRIPT_DIR / config.clap_embeddings_path).exists()
407
+ mulan_exists = (SCRIPT_DIR / config.mulan_embeddings_path).exists()
408
+ tags_exists = (SCRIPT_DIR / config.tag_names_path).exists()
409
+
410
+ if clap_exists and mulan_exists and tags_exists:
411
+ available.append("Zero-Shot (CLAP + MuLan)")
412
+ else:
413
+ missing = []
414
+ if not clap_exists:
415
+ missing.append("clap_tag_embeddings.npy")
416
+ if not mulan_exists:
417
+ missing.append("mulan_tag_embeddings.npy")
418
+ if not tags_exists:
419
+ missing.append("musiccaps_tag_names.txt")
420
+ messages.append(f"Zero-Shot: Missing {', '.join(missing)}")
421
+
422
+ # Check MTG-Jamendo checkpoint
423
+ if (SCRIPT_DIR / config.mtg_checkpoint_path).exists():
424
+ available.append("MTG-Jamendo (MERT)")
425
+ else:
426
+ messages.append(f"MTG-Jamendo: Missing {config.mtg_checkpoint_path}")
427
+
428
+ return available, messages
429
+
430
+
431
+ def create_interface():
432
+ """Create and configure the Gradio interface."""
433
+ available_methods, status_messages = check_available_methods()
434
+
435
+ # Build status message
436
+ if available_methods:
437
+ status = f"**Available methods:** {', '.join(available_methods)}"
438
+ else:
439
+ status = "**Warning:** No tagging methods available!"
440
+
441
+ if status_messages:
442
+ status += "\n\n**Missing files:**\n" + "\n".join(f"- {m}" for m in status_messages)
443
+
444
+ # Default method
445
+ default_method = available_methods[0] if available_methods else "Zero-Shot (CLAP + MuLan)"
446
+
447
+ with gr.Blocks(title="Music Tagger", theme=gr.themes.Soft()) as interface:
448
+ gr.Markdown("""
449
+ # Music Auto-Tagger
450
+
451
+ Upload a song to analyze it with AI-powered music tagging models.
452
+
453
+ **Two methods available:**
454
+ - **Zero-Shot (CLAP + MuLan):** Uses ~1,300 tags from MusicCaps without training
455
+ - **MTG-Jamendo (MERT):** Uses a trained classifier for genre/instrument/mood tags
456
+ """)
457
+
458
+ gr.Markdown(status)
459
+
460
+ with gr.Row():
461
+ with gr.Column(scale=1):
462
+ audio_input = gr.Audio(
463
+ label="Upload Audio File",
464
+ type="filepath",
465
+ sources=["upload", "microphone"]
466
+ )
467
+
468
+ method_dropdown = gr.Dropdown(
469
+ choices=["Zero-Shot (CLAP + MuLan)", "MTG-Jamendo (MERT)"],
470
+ value=default_method,
471
+ label="Tagging Method",
472
+ info="Choose which model to use for tagging"
473
+ )
474
+
475
+ normalization_dropdown = gr.Dropdown(
476
+ choices=["Individual (recommended)", "Global", "MuLan Only"],
477
+ value="Individual (recommended)",
478
+ label="Normalization (Zero-Shot only)",
479
+ info="How to combine CLAP and MuLan scores",
480
+ visible=True
481
+ )
482
+
483
+ top_k_slider = gr.Slider(
484
+ minimum=5,
485
+ maximum=50,
486
+ value=15,
487
+ step=5,
488
+ label="Number of Tags",
489
+ info="How many top tags to show"
490
+ )
491
+
492
+ analyze_btn = gr.Button("Analyze", variant="primary", size="lg")
493
+
494
+ with gr.Column(scale=2):
495
+ output_text = gr.Markdown(
496
+ label="Results",
497
+ value="Upload an audio file and click 'Analyze' to see predictions."
498
+ )
499
+
500
+ # Show/hide normalization based on method
501
+ def update_normalization_visibility(method):
502
+ return gr.update(visible=(method == "Zero-Shot (CLAP + MuLan)"))
503
+
504
+ method_dropdown.change(
505
+ fn=update_normalization_visibility,
506
+ inputs=[method_dropdown],
507
+ outputs=[normalization_dropdown]
508
+ )
509
+
510
+ # Analyze button
511
+ analyze_btn.click(
512
+ fn=analyze_audio,
513
+ inputs=[audio_input, method_dropdown, normalization_dropdown, top_k_slider],
514
+ outputs=[output_text]
515
+ )
516
+
517
+ gr.Markdown("""
518
+ ---
519
+ **Tips:**
520
+ - Supported formats: WAV, MP3, FLAC, OGG, and more
521
+ - Audio is automatically resampled to the required sample rate
522
+ - First analysis may take longer as models are loaded into memory
523
+ """)
524
+
525
+ return interface
526
+
527
+
528
+ # ============================================================
529
+ # Main Entry Point
530
+ # ============================================================
531
+ if __name__ == "__main__":
532
+ print("=" * 60)
533
+ print("Music Tagger GUI")
534
+ print("=" * 60)
535
+ print(f"Device: {config.device}")
536
+ print(f"Script directory: {SCRIPT_DIR}")
537
+
538
+ # Check available methods
539
+ available, messages = check_available_methods()
540
+ print(f"\nAvailable methods: {available if available else 'None'}")
541
+ if messages:
542
+ print("Status messages:")
543
+ for msg in messages:
544
+ print(f" - {msg}")
545
+
546
+ # Launch interface
547
+ print("\nLaunching Gradio interface...")
548
+ interface = create_interface()
549
+ interface.launch(
550
+ share=False, # Set to True to create a public link
551
+ server_name="0.0.0.0", # Allow external connections
552
+ server_port=7860,
553
+ show_error=True
554
+ )
musiccaps_tag_names.txt ADDED
The diff for this file is too large to render. See raw diff