| """ |
| ADMC - AI Music Detection API |
| Hugging Face Space (Gradio + FastAPI) |
| v2 - Fix: modello corretto + compatibilita Gradio 5+ |
| |
| Correzioni rispetto a v1: |
| 1. Modello corretto: AI-Music-Detection/ai_music_detection_large_60s |
| (il precedente motheecreator/ai-music-detection non esiste) |
| 2. allow_flagging rimosso (deprecato in Gradio 4+, rimpiazzato da flagging_mode) |
| """ |
|
|
| import gradio as gr |
| import numpy as np |
| import torch |
| from fastapi import FastAPI, Request, HTTPException |
| from fastapi.responses import JSONResponse |
| from transformers import pipeline |
| import tempfile |
| import os |
|
|
| |
| |
| |
| MODEL_ID = "AI-Music-Detection/ai_music_detection_large_60s" |
|
|
| print("Loading model: " + MODEL_ID) |
| classifier = None |
| try: |
| classifier = pipeline( |
| "audio-classification", |
| model=MODEL_ID, |
| device=0 if torch.cuda.is_available() else -1, |
| ) |
| print("Model loaded successfully") |
| except Exception as e: |
| print("Warning: Could not load model (" + str(e) + "). Using fallback heuristic.") |
|
|
|
|
| def analyze_audio(audio_path): |
| """Analizza file audio, restituisce lista [{label, score}].""" |
|
|
| if classifier is not None: |
| try: |
| result = classifier(audio_path, top_k=2) |
| ai_score = 0.5 |
| for item in result: |
| lbl = item["label"].upper() |
| |
| |
| if "LABEL_1" in lbl or "AI" in lbl or "FAKE" in lbl: |
| ai_score = float(item["score"]) |
| break |
| if "LABEL_0" in lbl or "HUMAN" in lbl or "REAL" in lbl: |
| ai_score = 1.0 - float(item["score"]) |
| break |
| return [ |
| {"label": "AI", "score": round(ai_score, 4)}, |
| {"label": "Human", "score": round(1.0 - ai_score, 4)}, |
| ] |
| except Exception as e: |
| print("Inference error: " + str(e)) |
|
|
| |
| try: |
| import librosa |
| y, sr = librosa.load(audio_path, sr=22050, duration=30.0, mono=True) |
| mel = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128, fmax=8000) |
| mel_db = librosa.power_to_db(mel, ref=np.max) |
| std = float(np.std(mel_db)) |
| mean = float(np.mean(np.abs(mel_db))) |
| ratio = std / (mean + 1e-6) |
| ai_score = max(0.0, min(1.0, 1.0 - (ratio / 3.0))) |
| except Exception: |
| ai_score = 0.5 |
|
|
| return [ |
| {"label": "AI", "score": round(ai_score, 4)}, |
| {"label": "Human", "score": round(1.0 - ai_score, 4)}, |
| ] |
|
|
|
|
| def gradio_analyze(audio_file): |
| if audio_file is None: |
| return "Nessun file caricato." |
| result = analyze_audio(audio_file) |
| ai_score = next((r["score"] for r in result if r["label"] == "AI"), 0.5) |
| verdict = "Probabile AI" if ai_score > 0.5 else "Probabile umano (autorialita' umana)" |
| return ( |
| verdict + "\n\n" |
| "AI Score: " + str(round(ai_score * 100, 1)) + "%\n" |
| "Human Score: " + str(round((1 - ai_score) * 100, 1)) + "%\n\n" |
| "Soglia ADMC: 50% (configurabile nel plugin WordPress)" |
| ) |
|
|
|
|
| |
| demo = gr.Interface( |
| fn=gradio_analyze, |
| inputs=gr.Audio(type="filepath", label="Carica brano musicale (MP3/WAV/FLAC)"), |
| outputs=gr.Textbox(label="Risultato analisi ADMC"), |
| title="ADMC - Rilevamento Autorialita AI nella Musica", |
| description=( |
| "Analizza un brano musicale per rilevare se e stato generato da AI o creato da un umano. " |
| "Parte del sistema di certificazione ADMC - Artigiani della Musica." |
| ), |
| flagging_mode="never", |
| ) |
|
|
| |
| app = gr.mount_gradio_app(FastAPI(), demo, path="/") |
|
|
|
|
| @app.post("/analyze") |
| async def api_analyze(request: Request): |
| """ |
| POST /analyze |
| Body: raw audio bytes |
| Content-Type: audio/mpeg | audio/wav | audio/flac | audio/ogg |
| Returns: [{"label": "AI", "score": 0.87}, {"label": "Human", "score": 0.13}] |
| """ |
| content_type = request.headers.get("content-type", "") |
| ext_map = { |
| "audio/mpeg": ".mp3", |
| "audio/wav": ".wav", |
| "audio/x-wav": ".wav", |
| "audio/flac": ".flac", |
| "audio/ogg": ".ogg", |
| "audio/aiff": ".aiff", |
| } |
| ext = ext_map.get(content_type.split(";")[0].strip(), ".mp3") |
| audio_data = await request.body() |
|
|
| if len(audio_data) == 0: |
| raise HTTPException(status_code=400, detail="Nessun file audio ricevuto.") |
| if len(audio_data) > 100 * 1024 * 1024: |
| raise HTTPException(status_code=413, detail="File troppo grande (max 100 MB).") |
|
|
| with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as tmp: |
| tmp.write(audio_data) |
| tmp_path = tmp.name |
|
|
| try: |
| result = analyze_audio(tmp_path) |
| except Exception as e: |
| raise HTTPException(status_code=500, detail="Errore analisi: " + str(e)) |
| finally: |
| if os.path.exists(tmp_path): |
| os.unlink(tmp_path) |
|
|
| return JSONResponse(content=result) |
|
|
|
|
| @app.get("/health") |
| async def health(): |
| return { |
| "status": "ok", |
| "model": MODEL_ID, |
| "loaded": classifier is not None, |
| "gpu": torch.cuda.is_available(), |
| } |
|
|