File size: 2,749 Bytes
cc21ec4
8edc68d
 
 
 
cc21ec4
1fa094a
 
 
 
 
cc21ec4
8edc68d
 
 
 
 
 
 
 
200538d
8edc68d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc21ec4
8edc68d
 
 
 
 
 
 
 
 
 
 
 
cc21ec4
8edc68d
 
 
 
 
 
 
 
 
 
 
 
 
 
7b3d2d1
8edc68d
 
 
 
f89f28d
8edc68d
f89f28d
8edc68d
7b3d2d1
8edc68d
 
 
671a681
 
f89f28d
 
1fa094a
 
 
4a5ed93
1fa094a
 
580b414
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
from huggingface_hub import hf_hub_download
import time, traceback, os
import torch
import numpy as np
from fastai.vision.all import *
import pathlib

# Parche para compatibilidad Windows -> Linux en fastai
if os.name == 'posix':
    pathlib.WindowsPath = pathlib.PosixPath

os.environ["OMP_NUM_THREADS"] = "1"
torch.set_num_threads(1)

def log(msg):
    print(f"[DEBUG {time.strftime('%H:%M:%S')}] {msg}", flush=True)


# --- CARGA DEL MODELO ---
repo_id = "daniihc16/chest-xray-classifier"
filename = "model.pkl"

try:
    log("Descargando...")
    model_path = hf_hub_download(repo_id=repo_id, filename=filename)
    
    log("Cargando estructura...")
    learn = load_learner(model_path)
    model = learn.model.eval() # Extraemos el modelo puro de PyTorch
    vocab = learn.dls.vocab    # Guardamos las etiquetas
    
    log(f"Modelo extraído. Vocabulario: {vocab}")
    
except Exception as e:
    log(f"ERROR FATAL: {e}")
    raise e

# --- PREDICCIÓN MANUAL (SIN DATALOADERS) ---
def predict_pure_pytorch(img):
    start = time.time()
    log("1. Petición recibida")
    
    try:
        # 1. PREPROCESAMIENTO MANUAL
        
        if img.size != (224, 224):
            img = img.resize((224, 224))

        x = torch.tensor(np.array(img)).float()
        
        x = x.permute(2, 0, 1)
        
        x /= 255.0
        mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
        std = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
        x = (x - mean) / std
        
        x = x.unsqueeze(0)
        
        log(f"   Tensor preparado: {x.shape}")

        # 2. INFERENCIA
        log("2. Ejecutando forward pass...")
        with torch.no_grad():
            out = model(x) # Inferencia directa, sin wrappers
            probs = torch.softmax(out, dim=1) # Convertir logits a probabilidades
            
        probs_np = probs[0].numpy()
        log(f"3. Probabilidades crudas: {probs_np}")
        
        # 3. RESULTADO
        result = {vocab[i]: float(probs_np[i]) for i in range(len(vocab))}
        log(f"   Tiempo: {time.time()-start:.3f}s")
        
        return result

    except Exception as e:
        log("ERROR EN PREDICCIÓN MANUAL")
        traceback.print_exc()
        return {f"Error: {str(e)}": 0.0}

# --- INTERFAZ ---
interface = gr.Interface(
    fn=predict_pure_pytorch,
    inputs=gr.Image(type="pil"),
    outputs=gr.Label(),
    title="Clasificador Radiografías",
    description="",
    examples=["IM-0115-0001.jpeg", "person1_bacteria_1.jpeg"]

)

# Variable para que Hugging Face detecte la interfaz si usa 'gradio app.py'
demo = interface

if __name__ == "__main__":
    # Launch simple para evitar conflictos de puerto/servidor
    interface.launch()