PablitoGil14 commited on
Commit
31949e2
·
verified ·
1 Parent(s): 5e386d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -46
app.py CHANGED
@@ -1,29 +1,37 @@
 
1
  import gradio as gr
2
  from fastai.vision.all import *
3
- import numpy as np
4
- from PIL import Image
5
- from huggingface_hub import hf_hub_download
6
  import torchvision.transforms as transforms
7
- import torch
8
- from albumentations import (
9
- Compose,
10
- HorizontalFlip,
11
- Rotate,
12
- GridDistortion
13
- )
14
  from pathlib import Path
 
 
15
 
16
- # --- FUNCIONES Y CLASES NECESARIAS PARA EL PICKLE ---
17
-
18
- def get_y_fn(x):
19
- return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png"))
 
 
 
 
 
20
 
21
  class TargetMaskConvertTransform(ItemTransform):
22
  def __init__(self):
23
  pass
24
  def encodes(self, x):
25
  img,mask = x
 
 
26
  mask = np.array(mask)
 
27
  mask[(mask!=255) & (mask!=150) & (mask!=76) & (mask!=74) & (mask!=29) & (mask!=25)]=0
28
  mask[mask==255]=1
29
  mask[mask==150]=2
@@ -31,50 +39,68 @@ class TargetMaskConvertTransform(ItemTransform):
31
  mask[mask==74]=4
32
  mask[mask==29]=3
33
  mask[mask==25]=3
34
- return img, PILMask.create(mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  class SegmentationAlbumentationsTransform(ItemTransform):
37
  split_idx = 0
 
38
  def __init__(self, aug):
39
  self.aug = aug
 
40
  def encodes(self, x):
41
  img,mask = x
42
  aug = self.aug(image=np.array(img), mask=np.array(mask))
43
  return PILImage.create(aug["image"]), PILMask.create(aug["mask"])
44
 
45
- # --- CARGAR MODELO ---
46
-
47
- model_path = hf_hub_download(repo_id="PablitoGil14/AP-Practica3", filename="export.pkl")
48
- learn = load_learner(model_path)
 
49
 
50
- # --- FUNCIÓN DE PREDICCIÓN ---
51
 
52
- def segmentar(img: Image.Image):
53
- img = img.resize((640, 480))
54
- x = transforms.Compose([
55
- transforms.ToTensor(),
56
- transforms.Normalize(*imagenet_stats)
57
- ])(img).unsqueeze(0)
58
-
 
59
  with torch.no_grad():
60
- preds = learn.model.eval()(x)
61
- mask = torch.argmax(preds, dim=1).squeeze().cpu().numpy()
62
-
63
- out_mask = np.zeros_like(mask, dtype=np.uint8)
64
- out_mask[mask == 1] = 255
65
- out_mask[mask == 2] = 150
66
- out_mask[mask == 3] = 29
67
- out_mask[mask == 4] = 74
68
- return Image.fromarray(out_mask)
69
-
70
- # --- INTERFAZ GRADIO ---
71
 
72
- demo = gr.Interface(
73
- fn=segmentar,
74
- inputs=gr.Image(type="pil"),
75
- outputs=gr.Image(type="pil"),
76
- title="Segmentador de Viñedos",
77
- description="Sube una imagen y el modelo segmentará racimos de uva, hojas, madera y postes."
78
- )
79
 
80
- demo.launch()
 
1
+ from huggingface_hub import from_pretrained_fastai
2
  import gradio as gr
3
  from fastai.vision.all import *
 
 
 
4
  import torchvision.transforms as transforms
5
+ import torchvision.transforms as transforms
6
+ from fastai.basics import *
7
+ from fastai.vision import models
8
+ from fastai.vision.all import *
9
+ from fastai.metrics import *
10
+ from fastai.data.all import *
11
+ from fastai.callback import *
12
  from pathlib import Path
13
+ import random
14
+ import PIL
15
 
16
+ #Primero definimos todas las funciones, clases y variables que sopn necesarias para que esto funcione
17
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
18
+ def transform_image(image):
19
+ my_transforms = transforms.Compose([transforms.ToTensor(),
20
+ transforms.Normalize(
21
+ [0.485, 0.456, 0.406],
22
+ [0.229, 0.224, 0.225])])
23
+ image_aux = image
24
+ return my_transforms(image_aux).unsqueeze(0).to(device)
25
 
26
  class TargetMaskConvertTransform(ItemTransform):
27
  def __init__(self):
28
  pass
29
  def encodes(self, x):
30
  img,mask = x
31
+
32
+ #Convert to array
33
  mask = np.array(mask)
34
+
35
  mask[(mask!=255) & (mask!=150) & (mask!=76) & (mask!=74) & (mask!=29) & (mask!=25)]=0
36
  mask[mask==255]=1
37
  mask[mask==150]=2
 
39
  mask[mask==74]=4
40
  mask[mask==29]=3
41
  mask[mask==25]=3
42
+
43
+ # Back to PILMask
44
+ mask = PILMask.create(mask)
45
+ return img, mask
46
+
47
+ from albumentations import (
48
+ Compose,
49
+ OneOf,
50
+ ElasticTransform,
51
+ GridDistortion,
52
+ OpticalDistortion,
53
+ HorizontalFlip,
54
+ Rotate,
55
+ Transpose,
56
+ CLAHE,
57
+ ShiftScaleRotate
58
+ )
59
+
60
+ def get_y_fn (x):
61
+ return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png"))
62
 
63
  class SegmentationAlbumentationsTransform(ItemTransform):
64
  split_idx = 0
65
+
66
  def __init__(self, aug):
67
  self.aug = aug
68
+
69
  def encodes(self, x):
70
  img,mask = x
71
  aug = self.aug(image=np.array(img), mask=np.array(mask))
72
  return PILImage.create(aug["image"]), PILMask.create(aug["mask"])
73
 
74
+ #Cargamos el modelo
75
+ repo_id = "luisvarona/Practica3"
76
+ learn = from_pretrained_fastai(repo_id)
77
+ model = learn.model
78
+ model = model.cpu()
79
 
 
80
 
81
+ # Definimos una función que se encarga de llevar a cabo las predicciones
82
+ def predict(img_ruta):
83
+ # img = PIL.Image.open(img_ruta) #esto si el parámetro de entrada es una ruta a una imagen
84
+ # img = img_ruta # esto si el parámetro de entrada es una imagen
85
+ img = PIL.Image.fromarray(img_ruta)
86
+ image = transforms.Resize((480,640))(img)
87
+ tensor = transform_image(image=image)
88
+ model.to(device)
89
  with torch.no_grad():
90
+ outputs = model(tensor)
91
+
92
+ outputs = torch.argmax(outputs,1)
93
+ mask = np.array(outputs.cpu())
94
+ mask[mask==1]=255
95
+ mask[mask==2]=150
96
+ mask[mask==3]=29
97
+ mask[mask==4]=74
98
+ mask = np.reshape(mask,(480,640))
99
+ return Image.fromarray(mask.astype('uint8'))
 
100
 
101
+
102
+ #img = PILImage.create(img) #igual hay que usar esto en vez de PIL.Image.open
103
+
104
+ # Creamos la interfaz y la lanzamos.
105
+ gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(480, 640)), outputs=gr.inputs.Image(shape=(480, 640)), examples=['color_61.jpg','color_62.jpg']).launch(share=False)
 
 
106