yrodriguezmd commited on
Commit
a4c78c5
·
1 Parent(s): 694ee3b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -0
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from icevision.all import *
2
+ import icedata
3
+ import PIL, requests
4
+ import torch
5
+ from torchvision import transforms
6
+ import gradio as gr
7
+ # Download the dataset
8
+ url = "https://cvbp-secondary.z19.web.core.windows.net/datasets/object_detection/odFridgeObjects.zip"
9
+ dest_dir = "fridge"
10
+ data_dir = icedata.load_data(url, dest_dir)
11
+ # Create the parser
12
+ parser = parsers.VOCBBoxParser(annotations_dir=data_dir / "odFridgeObjects/annotations", images_dir=data_dir / "odFridgeObjects/images")
13
+ # Parse annotations to create records
14
+ train_records, valid_records = parser.parse()
15
+ class_map = parser.class_map
16
+ extra_args = {}
17
+ model_type = models.torchvision.retinanet
18
+ backbone = model_type.backbones.resnet50_fpn
19
+ # Instantiate the model
20
+ model = model_type.model(backbone=backbone(pretrained=True), num_classes=len(parser.class_map), **extra_args)
21
+ # Transforms
22
+ # size is set to 384 because EfficientDet requires its inputs to be divisible by 128
23
+ image_size = 384
24
+ train_tfms = tfms.A.Adapter([*tfms.A.aug_tfms(size=image_size, presize=512), tfms.A.Normalize()])
25
+ valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(image_size), tfms.A.Normalize()])
26
+ # Datasets
27
+ train_ds = Dataset(train_records, train_tfms)
28
+ valid_ds = Dataset(valid_records, valid_tfms)
29
+ # Data Loaders
30
+ train_dl = model_type.train_dl(train_ds, batch_size=8, num_workers=4, shuffle=True)
31
+ valid_dl = model_type.valid_dl(valid_ds, batch_size=8, num_workers=4, shuffle=False)
32
+ metrics = [COCOMetric(metric_type=COCOMetricType.bbox)]
33
+ learn = model_type.fastai.learner(dls=[train_dl, valid_dl], model=model, metrics=metrics)
34
+ learn = learn.load('model')
35
+ import os
36
+ for root, dirs, files in os.walk(r'sample_images/'):
37
+ for filename in files:
38
+ print(filename)
39
+ examples = ["sample_images/"+file for file in files]
40
+ article="<p style='text-align: center'><a href='https://dicksonneoh.com/fridge-detector/' target='_blank'>Blog post</a></p>"
41
+ enable_queue=True
42
+ #examples = [['sample_images/3.jpg']]
43
+ examples = [["sample_images/"+file] for file in files]
44
+ def show_preds(input_image, display_label, display_bbox, detection_threshold):
45
+ if detection_threshold==0: detection_threshold=0.5
46
+ img = PIL.Image.fromarray(input_image, 'RGB')
47
+ pred_dict = model_type.end2end_detect(img, valid_tfms, model, class_map=class_map, detection_threshold=detection_threshold,
48
+ display_label=display_label, display_bbox=display_bbox, return_img=True,
49
+ font_size=16, label_color="#FF59D6")
50
+ return pred_dict['img']
51
+ # display_chkbox = gr.inputs.CheckboxGroup(["Label", "BBox"], label="Display", default=True)
52
+ display_chkbox_label = gr.inputs.Checkbox(label="Label", default=True)
53
+ display_chkbox_box = gr.inputs.Checkbox(label="Box", default=True)
54
+ detection_threshold_slider = gr.inputs.Slider(minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold")
55
+ outputs = gr.outputs.Image(type="pil")
56
+ # Option 1: Get an image from local drive
57
+ gr_interface = gr.Interface(fn=show_preds, inputs=["image", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - Fridge Object', article=article, examples=examples)