| from huggingface_hub import from_pretrained_fastai |
| from fastai.vision.all import * |
| import gradio as gr |
|
|
| import torchvision.transforms as transforms |
| import PIL |
| from pathlib import Path |
|
|
| def get_y_fn (x): |
| return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png")) |
|
|
| class SegmentationAlbumentationsTransform(ItemTransform): |
| split_idx = 0 |
|
|
| def __init__(self, aug): |
| self.aug = aug |
|
|
| def encodes(self, x): |
| img,mask = x |
| aug = self.aug(image=np.array(img), mask=np.array(mask)) |
| return PILImage.create(aug["image"]), PILMask.create(aug["mask"]) |
|
|
| class TargetMaskConvertTransform(ItemTransform): |
| def __init__(self): |
| pass |
| def encodes(self, x): |
| img,mask = x |
|
|
| |
| mask = np.array(mask) |
|
|
| mask[list(map(np.all , zip( |
| mask != 255 , |
| mask != 150 , |
| mask != 76 , |
| mask != 74 , |
| mask != 29 , |
| mask != 25 |
| )))] = 0 |
| mask[mask == 255] = 1 |
| mask[mask == 150] = 2 |
| mask[mask == 76] = 3 |
| mask[mask == 74] = 3 |
| mask[mask == 29] = 4 |
| mask[mask == 25] = 4 |
|
|
| |
| mask = PILMask.create(mask) |
| return img, mask |
|
|
| repo_id = "pamunarr/segmentacion_uvas" |
| learn = from_pretrained_fastai(repo_id) |
|
|
| aux=learn.model |
| aux=aux.cpu() |
|
|
| img = PILImage.create('color_206.jpg') |
| transformer=transforms.Compose([transforms.Resize((480,640)), |
| transforms.ToTensor(), |
| transforms.Normalize( |
| [0.485, 0.456, 0.406], |
| [0.229, 0.224, 0.225])]) |
| img=transformer(img).unsqueeze(0) |
| img=img.cpu() |
|
|
| model=torch.jit.trace(aux, (img)) |
|
|
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| model = model.cpu() |
| model.eval() |
| model.to(device) |
|
|
| def transform_image(image): |
| my_transforms = transforms.Compose([transforms.ToTensor(), |
| transforms.Normalize( |
| [0.485, 0.456, 0.406], |
| [0.229, 0.224, 0.225])]) |
| image_aux = image |
| return my_transforms(image_aux).unsqueeze(0).to(device) |
|
|
| def segmenta(img): |
| img = PIL.Image.fromarray(img) |
| image = transforms.Resize((480,640))(img) |
| tensor = transform_image(image=image) |
|
|
| with torch.no_grad(): |
| outputs = model(tensor) |
|
|
| outputs = torch.argmax(outputs,1) |
|
|
| mask = np.array(outputs.cpu()) |
| mask=np.reshape(mask,(480,640)) |
|
|
| img_mask = np.zeros((480 , 640 , 3) , dtype = np.uint8) |
| img_mask[mask == 1 , :] = (255 , 255 , 255) |
| img_mask[mask == 2 , 1] = 255 |
| img_mask[mask == 3 , 0] = 255 |
| img_mask[mask == 4 , 2] = 255 |
|
|
| return Image.fromarray(img_mask) |
|
|
| gr.Interface(fn = segmenta , inputs = "image" , outputs = "image" , |
| examples = [ |
| 'color_206.jpg' , 'color_155.jpg' |
| ]).launch(share = False) |