Practica1 / app.py
daniihc16's picture
Update app.py
671a681 verified
import gradio as gr
from huggingface_hub import hf_hub_download
import time, traceback, os
import torch
import numpy as np
from fastai.vision.all import *
import pathlib
# Parche para compatibilidad Windows -> Linux en fastai
if os.name == 'posix':
pathlib.WindowsPath = pathlib.PosixPath
os.environ["OMP_NUM_THREADS"] = "1"
torch.set_num_threads(1)
def log(msg):
print(f"[DEBUG {time.strftime('%H:%M:%S')}] {msg}", flush=True)
# --- CARGA DEL MODELO ---
repo_id = "daniihc16/chest-xray-classifier"
filename = "model.pkl"
try:
log("Descargando...")
model_path = hf_hub_download(repo_id=repo_id, filename=filename)
log("Cargando estructura...")
learn = load_learner(model_path)
model = learn.model.eval() # Extraemos el modelo puro de PyTorch
vocab = learn.dls.vocab # Guardamos las etiquetas
log(f"Modelo extraído. Vocabulario: {vocab}")
except Exception as e:
log(f"ERROR FATAL: {e}")
raise e
# --- PREDICCIÓN MANUAL (SIN DATALOADERS) ---
def predict_pure_pytorch(img):
start = time.time()
log("1. Petición recibida")
try:
# 1. PREPROCESAMIENTO MANUAL
if img.size != (224, 224):
img = img.resize((224, 224))
x = torch.tensor(np.array(img)).float()
x = x.permute(2, 0, 1)
x /= 255.0
mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
std = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
x = (x - mean) / std
x = x.unsqueeze(0)
log(f" Tensor preparado: {x.shape}")
# 2. INFERENCIA
log("2. Ejecutando forward pass...")
with torch.no_grad():
out = model(x) # Inferencia directa, sin wrappers
probs = torch.softmax(out, dim=1) # Convertir logits a probabilidades
probs_np = probs[0].numpy()
log(f"3. Probabilidades crudas: {probs_np}")
# 3. RESULTADO
result = {vocab[i]: float(probs_np[i]) for i in range(len(vocab))}
log(f" Tiempo: {time.time()-start:.3f}s")
return result
except Exception as e:
log("ERROR EN PREDICCIÓN MANUAL")
traceback.print_exc()
return {f"Error: {str(e)}": 0.0}
# --- INTERFAZ ---
interface = gr.Interface(
fn=predict_pure_pytorch,
inputs=gr.Image(type="pil"),
outputs=gr.Label(),
title="Clasificador Radiografías",
description="",
examples=["IM-0115-0001.jpeg", "person1_bacteria_1.jpeg"]
)
# Variable para que Hugging Face detecte la interfaz si usa 'gradio app.py'
demo = interface
if __name__ == "__main__":
# Launch simple para evitar conflictos de puerto/servidor
interface.launch()