Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,18 +6,14 @@ import torch
|
|
| 6 |
from torchvision import models, transforms
|
| 7 |
from PIL import Image
|
| 8 |
|
| 9 |
-
# -- get torch and cuda version
|
| 10 |
-
#TORCH_VERSION = ".".join(torch.__version__.split(".")[:2])
|
| 11 |
-
#CUDA_VERSION = torch.__version__.split("+")[-1]
|
| 12 |
|
| 13 |
-
# -- install
|
| 14 |
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
|
| 15 |
os.system('pip install pyyaml==5.1')
|
| 16 |
|
| 17 |
import detectron2
|
| 18 |
|
| 19 |
-
from detectron2.utils.logger import setup_logger
|
| 20 |
-
# from google.colab.patches import cv2_imshow
|
| 21 |
|
| 22 |
from detectron2 import model_zoo
|
| 23 |
from detectron2.engine import DefaultPredictor
|
|
@@ -26,10 +22,10 @@ from detectron2.utils.visualizer import Visualizer
|
|
| 26 |
from detectron2.data import MetadataCatalog, DatasetCatalog
|
| 27 |
import cv2
|
| 28 |
|
| 29 |
-
|
| 30 |
setup_logger()
|
| 31 |
|
| 32 |
-
# -- load rcnn model
|
| 33 |
cfg = get_cfg()
|
| 34 |
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
|
| 35 |
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
|
|
@@ -38,84 +34,80 @@ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
|
|
| 38 |
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
|
| 39 |
cfg.MODEL.DEVICE= 'cpu'
|
| 40 |
predictor = DefaultPredictor(cfg)
|
| 41 |
-
'''
|
| 42 |
-
os.system(wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O input.jpg)
|
| 43 |
-
im = cv2.imread("./input.jpg")
|
| 44 |
-
cv2_imshow(im)
|
| 45 |
-
|
| 46 |
-
outputs = predictor(im)
|
| 47 |
|
| 48 |
-
|
| 49 |
-
print(outputs["instances"].pred_boxes)
|
| 50 |
-
'''
|
| 51 |
-
# -- load design modernity model for classification
|
| 52 |
DesignModernityModel = torch.load("DesignModernityModel.pt")
|
| 53 |
|
| 54 |
-
#INPUT_FEATURES = DesignModernityModel.fc.in_features
|
| 55 |
-
#linear = nn.linear(INPUT_FEATURES, 5)
|
| 56 |
-
|
| 57 |
DesignModernityModel.eval() # set state of the model to inference
|
| 58 |
|
|
|
|
| 59 |
LABELS = ['2000-2003', '2006-2008', '2009-2011', '2012-2014', '2015-2018']
|
| 60 |
n_labels = len(LABELS)
|
| 61 |
|
|
|
|
| 62 |
MEAN = [0.485, 0.456, 0.406]
|
| 63 |
STD = [0.229, 0.224, 0.225]
|
| 64 |
|
|
|
|
| 65 |
carTransforms = transforms.Compose([transforms.Resize(224),
|
| 66 |
transforms.ToTensor(),
|
| 67 |
transforms.Normalize(mean=MEAN, std=STD)])
|
| 68 |
|
| 69 |
-
|
|
|
|
| 70 |
def cropImage(outputs, im, boxes, car_class_true):
|
| 71 |
# Get the masks
|
| 72 |
-
#car_class_true = outputs["instances"].pred_classes == 2
|
| 73 |
-
#boxes = list(outputs["instances"].pred_boxes[car_class_true])
|
| 74 |
masks = list(np.array(outputs["instances"].pred_masks[car_class_true]))
|
| 75 |
max_idx = torch.tensor([(x[2] - x[0])*(x[3] - x[1]) for x in boxes]).argmax().item()
|
| 76 |
|
| 77 |
# Pick an item to mask
|
| 78 |
item_mask = masks[max_idx]
|
|
|
|
| 79 |
# Get the true bounding box of the mask
|
| 80 |
segmentation = np.where(item_mask == True) # return a list of different position in the bow, which are the actual detected object
|
| 81 |
x_min = int(np.min(segmentation[1])) # minimum x position
|
| 82 |
x_max = int(np.max(segmentation[1]))
|
| 83 |
y_min = int(np.min(segmentation[0]))
|
| 84 |
y_max = int(np.max(segmentation[0]))
|
|
|
|
| 85 |
# Create cropped image from the just portion of the image we want
|
| 86 |
cropped = Image.fromarray(im[y_min:y_max, x_min:x_max, :], mode = 'RGB')
|
| 87 |
# Create a PIL Image out of the mask
|
| 88 |
mask = Image.fromarray((item_mask * 255).astype('uint8')) ###### change 255
|
| 89 |
# Crop the mask to match the cropped image
|
| 90 |
cropped_mask = mask.crop((x_min, y_min, x_max, y_max))
|
|
|
|
| 91 |
# Load in a background image and choose a paste position
|
| 92 |
height = y_max-y_min
|
| 93 |
width = x_max-x_min
|
| 94 |
background = Image.new(mode='RGB', size=(width, height), color=(255, 255, 255, 0))
|
|
|
|
| 95 |
# Create a new foreground image as large as the composite and paste the cropped image on top
|
| 96 |
new_fg_image = Image.new('RGB', background.size)
|
| 97 |
new_fg_image.paste(cropped)
|
|
|
|
| 98 |
# Create a new alpha mask as large as the composite and paste the cropped mask
|
| 99 |
new_alpha_mask = Image.new('L', background.size, color=0)
|
| 100 |
new_alpha_mask.paste(cropped_mask)
|
|
|
|
| 101 |
#composite the foreground and background using the alpha mask
|
| 102 |
composite = Image.composite(new_fg_image, background, new_alpha_mask)
|
|
|
|
| 103 |
return composite
|
| 104 |
|
| 105 |
-
|
| 106 |
def classifyCar(im):
|
| 107 |
-
|
| 108 |
im = cv2.imread(im)
|
| 109 |
-
|
| 110 |
-
label = "fail"
|
| 111 |
-
try:
|
| 112 |
outputs = predictor(im)
|
| 113 |
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1)
|
| 114 |
out = v.draw_instance_predictions(outputs["instances"])
|
| 115 |
-
|
| 116 |
car_class_true = outputs["instances"].pred_classes == 2
|
| 117 |
boxes = list(outputs["instances"].pred_boxes[car_class_true])
|
| 118 |
|
|
|
|
| 119 |
if len(boxes) != 0:
|
| 120 |
#max_idx = torch.tensor([(x[2] - x[0])*(x[3] - x[1]) for x in boxes]).argmax().item()
|
| 121 |
|
|
@@ -128,21 +120,14 @@ def classifyCar(im):
|
|
| 128 |
scores = torch.nn.functional.softmax(DesignModernityModel(carTransforms(im2).unsqueeze(0))[0])
|
| 129 |
label = {LABELS[i]: float(scores[i]) for i in range(n_labels)}
|
| 130 |
|
|
|
|
| 131 |
else:
|
| 132 |
im2 = Image.fromarray(np.uint8(im)).convert('RGB')
|
| 133 |
label = "No car detected"
|
| 134 |
-
except:
|
| 135 |
-
label = "fail2"
|
| 136 |
-
#im2 = carTransforms(im).unsqueeze(0) # transform and add batch dimension
|
| 137 |
-
#with torch.no_grad():
|
| 138 |
-
# scores = torch.nn.functional.softmax(DesignModernityModel(im2)[0])
|
| 139 |
-
#{LABELS[i]: float(scores[i]) for i in range(n_labels)}
|
| 140 |
-
#Image.fromarray(np.uint8(out.get_image())).convert('RGB')
|
| 141 |
-
return im2, label
|
| 142 |
|
| 143 |
-
|
| 144 |
|
| 145 |
-
# create interface for model
|
| 146 |
interface = gr.Interface(classifyCar, inputs='image', outputs=['image','label'], cache_examples=False, title='Modernity car classification')
|
| 147 |
interface.launch()
|
| 148 |
|
|
|
|
| 6 |
from torchvision import models, transforms
|
| 7 |
from PIL import Image
|
| 8 |
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
+
# -- install detectron2 from source ------------------------------------------------------------------------------
|
| 11 |
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
|
| 12 |
os.system('pip install pyyaml==5.1')
|
| 13 |
|
| 14 |
import detectron2
|
| 15 |
|
| 16 |
+
from detectron2.utils.logger import setup_logger
|
|
|
|
| 17 |
|
| 18 |
from detectron2 import model_zoo
|
| 19 |
from detectron2.engine import DefaultPredictor
|
|
|
|
| 22 |
from detectron2.data import MetadataCatalog, DatasetCatalog
|
| 23 |
import cv2
|
| 24 |
|
| 25 |
+
|
| 26 |
setup_logger()
|
| 27 |
|
| 28 |
+
# -- load rcnn model ---------------------------------------------------------------------------------------------
|
| 29 |
cfg = get_cfg()
|
| 30 |
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
|
| 31 |
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
|
|
|
|
| 34 |
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
|
| 35 |
cfg.MODEL.DEVICE= 'cpu'
|
| 36 |
predictor = DefaultPredictor(cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
+
# -- load design modernity model for classification --------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
| 39 |
DesignModernityModel = torch.load("DesignModernityModel.pt")
|
| 40 |
|
|
|
|
|
|
|
|
|
|
| 41 |
DesignModernityModel.eval() # set state of the model to inference
|
| 42 |
|
| 43 |
+
# Set class labels
|
| 44 |
LABELS = ['2000-2003', '2006-2008', '2009-2011', '2012-2014', '2015-2018']
|
| 45 |
n_labels = len(LABELS)
|
| 46 |
|
| 47 |
+
# define maéan and std dev for normalization
|
| 48 |
MEAN = [0.485, 0.456, 0.406]
|
| 49 |
STD = [0.229, 0.224, 0.225]
|
| 50 |
|
| 51 |
+
# define image transformation steps
|
| 52 |
carTransforms = transforms.Compose([transforms.Resize(224),
|
| 53 |
transforms.ToTensor(),
|
| 54 |
transforms.Normalize(mean=MEAN, std=STD)])
|
| 55 |
|
| 56 |
+
|
| 57 |
+
# -- define a function for extraction of the detected car ---------------------------------------------------------
|
| 58 |
def cropImage(outputs, im, boxes, car_class_true):
|
| 59 |
# Get the masks
|
|
|
|
|
|
|
| 60 |
masks = list(np.array(outputs["instances"].pred_masks[car_class_true]))
|
| 61 |
max_idx = torch.tensor([(x[2] - x[0])*(x[3] - x[1]) for x in boxes]).argmax().item()
|
| 62 |
|
| 63 |
# Pick an item to mask
|
| 64 |
item_mask = masks[max_idx]
|
| 65 |
+
|
| 66 |
# Get the true bounding box of the mask
|
| 67 |
segmentation = np.where(item_mask == True) # return a list of different position in the bow, which are the actual detected object
|
| 68 |
x_min = int(np.min(segmentation[1])) # minimum x position
|
| 69 |
x_max = int(np.max(segmentation[1]))
|
| 70 |
y_min = int(np.min(segmentation[0]))
|
| 71 |
y_max = int(np.max(segmentation[0]))
|
| 72 |
+
|
| 73 |
# Create cropped image from the just portion of the image we want
|
| 74 |
cropped = Image.fromarray(im[y_min:y_max, x_min:x_max, :], mode = 'RGB')
|
| 75 |
# Create a PIL Image out of the mask
|
| 76 |
mask = Image.fromarray((item_mask * 255).astype('uint8')) ###### change 255
|
| 77 |
# Crop the mask to match the cropped image
|
| 78 |
cropped_mask = mask.crop((x_min, y_min, x_max, y_max))
|
| 79 |
+
|
| 80 |
# Load in a background image and choose a paste position
|
| 81 |
height = y_max-y_min
|
| 82 |
width = x_max-x_min
|
| 83 |
background = Image.new(mode='RGB', size=(width, height), color=(255, 255, 255, 0))
|
| 84 |
+
|
| 85 |
# Create a new foreground image as large as the composite and paste the cropped image on top
|
| 86 |
new_fg_image = Image.new('RGB', background.size)
|
| 87 |
new_fg_image.paste(cropped)
|
| 88 |
+
|
| 89 |
# Create a new alpha mask as large as the composite and paste the cropped mask
|
| 90 |
new_alpha_mask = Image.new('L', background.size, color=0)
|
| 91 |
new_alpha_mask.paste(cropped_mask)
|
| 92 |
+
|
| 93 |
#composite the foreground and background using the alpha mask
|
| 94 |
composite = Image.composite(new_fg_image, background, new_alpha_mask)
|
| 95 |
+
|
| 96 |
return composite
|
| 97 |
|
| 98 |
+
# -- define function for image segmentation and classification --------------------------------------------------------
|
| 99 |
def classifyCar(im):
|
| 100 |
+
# read image
|
| 101 |
im = cv2.imread(im)
|
| 102 |
+
# perform segmentation
|
|
|
|
|
|
|
| 103 |
outputs = predictor(im)
|
| 104 |
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1)
|
| 105 |
out = v.draw_instance_predictions(outputs["instances"])
|
| 106 |
+
# check if a car was detected in the image
|
| 107 |
car_class_true = outputs["instances"].pred_classes == 2
|
| 108 |
boxes = list(outputs["instances"].pred_boxes[car_class_true])
|
| 109 |
|
| 110 |
+
# if a car was detected, extract the car and perform modernity score classification
|
| 111 |
if len(boxes) != 0:
|
| 112 |
#max_idx = torch.tensor([(x[2] - x[0])*(x[3] - x[1]) for x in boxes]).argmax().item()
|
| 113 |
|
|
|
|
| 120 |
scores = torch.nn.functional.softmax(DesignModernityModel(carTransforms(im2).unsqueeze(0))[0])
|
| 121 |
label = {LABELS[i]: float(scores[i]) for i in range(n_labels)}
|
| 122 |
|
| 123 |
+
# if no car was detected, show original image and print "No car detected"
|
| 124 |
else:
|
| 125 |
im2 = Image.fromarray(np.uint8(im)).convert('RGB')
|
| 126 |
label = "No car detected"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
+
return im2, label
|
| 129 |
|
| 130 |
+
# -- create interface for model ----------------------------------------------------------------------------------------
|
| 131 |
interface = gr.Interface(classifyCar, inputs='image', outputs=['image','label'], cache_examples=False, title='Modernity car classification')
|
| 132 |
interface.launch()
|
| 133 |
|