PablitoGil14 commited on
Commit
441c086
·
verified ·
1 Parent(s): f6bac2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -52
app.py CHANGED
@@ -1,75 +1,105 @@
1
  from huggingface_hub import from_pretrained_fastai
2
- from fastai.vision.all import *
3
  import gradio as gr
4
- import numpy as np
5
- from PIL import Image
6
  import torchvision.transforms as transforms
7
- from albumentations import (
8
- Compose, GridDistortion, HorizontalFlip, Rotate
9
- )
 
 
 
 
10
  from pathlib import Path
11
- import torch
 
12
 
 
13
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
-
15
  def transform_image(image):
16
- preprocess = transforms.Compose([
17
- transforms.ToTensor(),
18
- transforms.Normalize([0.485, 0.456, 0.406],
19
- [0.229, 0.224, 0.225])
20
- ])
21
- return preprocess(image).unsqueeze(0).to(device)
22
-
23
- def get_y_fn(x):
24
- return Path(str(x).replace("Images", "Labels").replace("color", "gt").replace(".jpg", ".png"))
25
 
26
  class TargetMaskConvertTransform(ItemTransform):
 
 
27
  def encodes(self, x):
28
- img, mask = x
 
 
29
  mask = np.array(mask)
30
- mask[(mask!=255) & (mask!=150) & (mask!=76) & (mask!=74) & (mask!=29) & (mask!=25)] = 0
31
- mask[mask==255] = 1
32
- mask[mask==150] = 2
33
- mask[(mask==76) | (mask==74)] = 4
34
- mask[(mask==29) | (mask==25)] = 3
35
- return img, PILMask.create(mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  class SegmentationAlbumentationsTransform(ItemTransform):
38
  split_idx = 0
39
- def __init__(self, aug): self.aug = aug
 
 
 
40
  def encodes(self, x):
41
- img, mask = x
42
  aug = self.aug(image=np.array(img), mask=np.array(mask))
43
  return PILImage.create(aug["image"]), PILMask.create(aug["mask"])
44
 
45
- repo_id = "PablitoGil14/AP-Practica3"
 
46
  learn = from_pretrained_fastai(repo_id)
47
- model = learn.model.cpu()
 
48
 
49
- def predict(img_input):
50
- image = Image.fromarray(img_input).resize((640, 480))
51
- tensor = transform_image(image)
 
 
 
 
 
52
  model.to(device)
53
  with torch.no_grad():
54
- output = model(tensor)
55
- pred = torch.argmax(output, dim=1).squeeze().cpu().numpy()
56
-
57
- # Recolorear la máscara
58
- colored = np.zeros_like(pred, dtype=np.uint8)
59
- colored[pred == 1] = 255
60
- colored[pred == 2] = 150
61
- colored[pred == 3] = 29
62
- colored[pred == 4] = 74
63
- return Image.fromarray(colored)
64
-
65
- demo = gr.Interface(
66
- fn=predict,
67
- inputs=gr.Image(type="numpy", label="Sube una imagen", shape=(480, 640)),
68
- outputs=gr.Image(type="pil", label="Máscara generada"),
69
- examples=["color_161.jpg", "color_162.jpg"],
70
- title="Segmentador de Viñedos",
71
- description="Sube una imagen y este modelo segmentará racimos de uva, hojas, madera y postes."
72
- )
73
-
74
- demo.launch()
75
 
 
 
 
 
 
 
1
  from huggingface_hub import from_pretrained_fastai
 
2
  import gradio as gr
3
+ from fastai.vision.all import *
 
4
  import torchvision.transforms as transforms
5
+ import torchvision.transforms as transforms
6
+ from fastai.basics import *
7
+ from fastai.vision import models
8
+ from fastai.vision.all import *
9
+ from fastai.metrics import *
10
+ from fastai.data.all import *
11
+ from fastai.callback import *
12
  from pathlib import Path
13
+ import random
14
+ import PIL
15
 
16
+ #Primero definimos todas las funciones, clases y variables que sopn necesarias para que esto funcione
17
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
18
  def transform_image(image):
19
+ my_transforms = transforms.Compose([transforms.ToTensor(),
20
+ transforms.Normalize(
21
+ [0.485, 0.456, 0.406],
22
+ [0.229, 0.224, 0.225])])
23
+ image_aux = image
24
+ return my_transforms(image_aux).unsqueeze(0).to(device)
 
 
 
25
 
26
  class TargetMaskConvertTransform(ItemTransform):
27
+ def __init__(self):
28
+ pass
29
  def encodes(self, x):
30
+ img,mask = x
31
+
32
+ #Convert to array
33
  mask = np.array(mask)
34
+
35
+ mask[(mask!=255) & (mask!=150) & (mask!=76) & (mask!=74) & (mask!=29) & (mask!=25)]=0
36
+ mask[mask==255]=1
37
+ mask[mask==150]=2
38
+ mask[mask==76]=4
39
+ mask[mask==74]=4
40
+ mask[mask==29]=3
41
+ mask[mask==25]=3
42
+
43
+ # Back to PILMask
44
+ mask = PILMask.create(mask)
45
+ return img, mask
46
+
47
+ from albumentations import (
48
+ Compose,
49
+ OneOf,
50
+ ElasticTransform,
51
+ GridDistortion,
52
+ OpticalDistortion,
53
+ HorizontalFlip,
54
+ Rotate,
55
+ Transpose,
56
+ CLAHE,
57
+ ShiftScaleRotate
58
+ )
59
+
60
+ def get_y_fn (x):
61
+ return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png"))
62
 
63
  class SegmentationAlbumentationsTransform(ItemTransform):
64
  split_idx = 0
65
+
66
+ def __init__(self, aug):
67
+ self.aug = aug
68
+
69
  def encodes(self, x):
70
+ img,mask = x
71
  aug = self.aug(image=np.array(img), mask=np.array(mask))
72
  return PILImage.create(aug["image"]), PILMask.create(aug["mask"])
73
 
74
+ #Cargamos el modelo
75
+ repo_id = "luisvarona/Practica3"
76
  learn = from_pretrained_fastai(repo_id)
77
+ model = learn.model
78
+ model = model.cpu()
79
 
80
+
81
+ # Definimos una función que se encarga de llevar a cabo las predicciones
82
+ def predict(img_ruta):
83
+ # img = PIL.Image.open(img_ruta) #esto si el parámetro de entrada es una ruta a una imagen
84
+ # img = img_ruta # esto si el parámetro de entrada es una imagen
85
+ img = PIL.Image.fromarray(img_ruta)
86
+ image = transforms.Resize((480,640))(img)
87
+ tensor = transform_image(image=image)
88
  model.to(device)
89
  with torch.no_grad():
90
+ outputs = model(tensor)
91
+
92
+ outputs = torch.argmax(outputs,1)
93
+ mask = np.array(outputs.cpu())
94
+ mask[mask==1]=255
95
+ mask[mask==2]=150
96
+ mask[mask==3]=29
97
+ mask[mask==4]=74
98
+ mask = np.reshape(mask,(480,640))
99
+ return Image.fromarray(mask.astype('uint8'))
 
 
 
 
 
 
 
 
 
 
 
100
 
101
+
102
+ #img = PILImage.create(img) #igual hay que usar esto en vez de PIL.Image.open
103
+
104
+ # Creamos la interfaz y la lanzamos.
105
+ gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(480, 640)), outputs=gr.inputs.Image(shape=(480, 640)), examples=['color_161.jpg','color_162.jpg']).launch(share=False)