Practica3 / app.py
hafsa101010's picture
Create app.py
307ccfc verified
from huggingface_hub import hf_hub_download
import gradio as gr
from fastai.vision.all import *
import torch
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
repo_id = "hafsa101010/Practica3"
model_path = hf_hub_download(repo_id=repo_id, filename="unet.pth")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.jit.load(model_path, map_location=device)
model.eval()
def transform_image(image):
image = image.resize((640, 480)) # Asegurar tamaño correcto
my_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return my_transforms(image).unsqueeze(0).to(device)
def predict(img):
tensor = transform_image(img)
with torch.no_grad():
outputs = model(tensor)
outputs = torch.argmax(outputs, 1).cpu().numpy().squeeze()
mask = np.zeros_like(outputs, dtype=np.uint8)
mask[outputs == 1] = 255 # grape
mask[outputs == 2] = 150 # leaves
mask[outputs == 3] = 76 # pole
mask[outputs == 4] = 74 # pole
mask[outputs == 5] = 29 # wood
mask[outputs == 6] = 25 # wood
return Image.fromarray(mask)
gr.Interface(fn=predict, inputs=gr.Image(type="pil"), outputs=gr.Image(type="pil"), examples=["color_181.jpg", "color_155.jpg"]).launch()