Janiopi's picture
Create main.py
1792470 verified
raw
history blame
4.97 kB
# app.py - API FastAPI en Hugging Face Space
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from transformers import pipeline
import tempfile
import os
import uvicorn
import gradio as gr
from threading import Thread
# Crear app FastAPI
app = FastAPI(title="Musical Instrument Detection API", version="1.0.0")
# Configurar CORS para permitir requests desde Android
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Cargar modelo
try:
classifier = pipeline("audio-classification", model="Janiopi/detector_de_instrumentos_v1")
print("✅ Modelo cargado exitosamente")
except Exception as e:
print(f"❌ Error cargando modelo: {e}")
classifier = None
@app.get("/")
async def root():
return {
"message": "Musical Instrument Detection API",
"status": "online",
"model_loaded": classifier is not None,
"endpoints": {
"detect": "/detect (POST)",
"health": "/health (GET)"
}
}
@app.get("/health")
async def health_check():
return {
"status": "ok",
"model_loaded": classifier is not None,
"message": "API funcionando correctamente"
}
@app.post("/detect")
async def detect_instrument(audio: UploadFile = File(...)):
"""
Endpoint para detectar instrumentos musicales en audio
"""
try:
if classifier is None:
raise HTTPException(status_code=503, detail="Modelo no disponible")
# Verificar tipo de archivo
if not audio.content_type.startswith('audio/'):
raise HTTPException(status_code=400, detail="El archivo debe ser de audio")
# Guardar archivo temporalmente
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as temp_file:
content = await audio.read()
temp_file.write(content)
temp_path = temp_file.name
print(f"📁 Procesando archivo: {audio.filename}, tamaño: {len(content)} bytes")
# Procesar con el modelo
results = classifier(temp_path)
# Limpiar archivo temporal
os.unlink(temp_path)
# Formatear resultados
formatted_results = []
for result in results:
formatted_results.append({
"label": result["label"],
"score": round(result["score"], 4)
})
print(f"✅ Resultados: {formatted_results}")
return {
"success": True,
"results": formatted_results,
"filename": audio.filename
}
except Exception as e:
print(f"❌ Error: {e}")
raise HTTPException(status_code=500, detail=f"Error procesando audio: {str(e)}")
# Crear interfaz Gradio simple para visualización
def gradio_interface():
with gr.Blocks(title="Musical Instrument Detection API") as demo:
gr.Markdown("# 🎵 Musical Instrument Detection API")
gr.Markdown("## API Endpoints:")
gr.Markdown("""
- **GET** `/` - Información general
- **GET** `/health` - Estado del servicio
- **POST** `/detect` - Detectar instrumentos (enviar archivo audio)
### Uso desde Android:
```
POST https://tu-usuario-musical-detector-api.hf.space/detect
Content-Type: multipart/form-data
Body: audio file
```
""")
# Interfaz simple para probar
with gr.Row():
audio_input = gr.Audio(type="filepath", label="Probar detección")
output_text = gr.Textbox(label="Resultado")
def test_detection(audio_path):
if audio_path and classifier:
try:
results = classifier(audio_path)
output = "Instrumentos detectados:\n"
for result in results:
output += f"- {result['label']}: {result['score']:.4f}\n"
return output
except Exception as e:
return f"Error: {e}"
return "No se pudo procesar el audio"
audio_input.change(test_detection, inputs=[audio_input], outputs=[output_text])
return demo
# Función para ejecutar FastAPI
def run_fastapi():
uvicorn.run(app, host="0.0.0.0", port=7860)
# Función para ejecutar Gradio
def run_gradio():
demo = gradio_interface()
demo.launch(server_name="0.0.0.0", server_port=7861, share=False)
if __name__ == "__main__":
# Ejecutar FastAPI en thread separado
fastapi_thread = Thread(target=run_fastapi, daemon=True)
fastapi_thread.start()
print("🚀 FastAPI iniciado en puerto 7860")
print("🎨 Gradio iniciado en puerto 7861")
# Ejecutar Gradio en el hilo principal
run_gradio()