mangaruu commited on
Commit
a02e08d
·
1 Parent(s): c1355e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -49
app.py CHANGED
@@ -5,70 +5,91 @@ except:
5
  os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
6
 
7
  import cv2
8
-
9
- from matplotlib.pyplot import axis
10
  import gradio as gr
11
  import requests
12
  import numpy as np
13
- from torch import nn
14
- import requests
15
-
16
- import torch
17
-
18
- from detectron2 import model_zoo
19
  from detectron2.engine import DefaultPredictor
20
  from detectron2.config import get_cfg
21
  from detectron2.utils.visualizer import Visualizer
22
  from detectron2.data import MetadataCatalog
23
 
24
- url1 = 'https://images.unsplash.com/photo-1503463168353-9d883c7f5255?q=80&w=1748&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D'
25
- r = requests.get(url1, allow_redirects=True)
26
- open("balloon.jpg", 'wb').write(r.content)
27
- url2 = 'https://images.unsplash.com/photo-1599158150601-1417ebbaafdd?q=80&w=1636&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D'
28
- r = requests.get(url2, allow_redirects=True)
29
- open("football.jpg", 'wb').write(r.content)
30
-
31
- model_name='COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml'
32
-
33
- # model = model_zoo.get(model_name, trained=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- cfg = get_cfg()
36
- # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
37
- cfg.merge_from_file(model_zoo.get_config_file(model_name))
38
- cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
39
- # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as w ell
40
- cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_name)
41
 
42
- if not torch.cuda.is_available():
43
- cfg.MODEL.DEVICE='cpu'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- predictor = DefaultPredictor(cfg)
46
 
 
47
 
48
- def inference(image):
49
- img = np.array(image.resize((1024,1024)))
50
- outputs = predictor(img)
51
 
52
- v = Visualizer(img, MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
53
- out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
54
 
55
- return out.get_image()
56
-
57
 
 
58
 
59
- title = "Detectron2-MaskRCNN X101"
60
- description = "demo for Detectron2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.\
61
- </br><b>Model: COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml</b>"
62
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.07177'>Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation</a> | <a href='https://github.com/facebookresearch/detectron2/blob/main/MODEL_ZOO.md'>Detectron model ZOO</a></p>"
63
 
64
- gr.Interface(
65
- inference,
66
- [gr.inputs.Image(type="pil", label="Input")],
67
- gr.outputs.Image(type="numpy", label="Output"),
68
- title=title,
69
- description=description,
70
- article=article,
71
- examples=[
72
- ["balloon.jpg"],
73
- ["football.jpg"]
74
- ]).launch()
 
5
  os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
6
 
7
  import cv2
 
 
8
  import gradio as gr
9
  import requests
10
  import numpy as np
 
 
 
 
 
 
11
  from detectron2.engine import DefaultPredictor
12
  from detectron2.config import get_cfg
13
  from detectron2.utils.visualizer import Visualizer
14
  from detectron2.data import MetadataCatalog
15
 
16
+ # Predefined Detectron2 models
17
+ models = [
18
+ {
19
+ "name": "Instance Segmentation",
20
+ "config_file": "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml",
21
+ },
22
+ {
23
+ "name": "Panoptic Segmentation",
24
+ "config_file": "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml",
25
+ },
26
+ ]
27
+
28
+ def setup_model(config_file):
29
+ cfg = get_cfg()
30
+ cfg.merge_from_file(model_zoo.get_config_file(config_file))
31
+ cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)
32
+ if not torch.cuda.is_available():
33
+ cfg.MODEL.DEVICE = "cpu"
34
+ return cfg
35
+
36
+ for model in models:
37
+ model["cfg"] = setup_model(model["config_file"])
38
+ model["metadata"] = MetadataCatalog.get(model["cfg"].DATASETS.TRAIN[0])
39
+
40
+ def inference(image_url, image, min_score, model_name):
41
+ model = next((m for m in models if m["name"] == model_name), None)
42
+ if not model:
43
+ raise ValueError("Model not found")
44
+
45
+ if image_url:
46
+ r = requests.get(image_url)
47
+ if r:
48
+ im = np.frombuffer(r.content, dtype="uint8")
49
+ im = cv2.imdecode(im, cv2.IMREAD_COLOR)
50
+ else:
51
+ # Model expects BGR!
52
+ im = image[:,:,::-1]
53
+
54
+ model["cfg"].MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score
55
+ predictor = DefaultPredictor(model["cfg"])
56
+ outputs = predictor(im)
57
+
58
+ v = Visualizer(im, model["metadata"], scale=1.2)
59
+ out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
60
 
61
+ return out.get_image()
 
 
 
 
 
62
 
63
+ title = "# Segmentation Model Demo"
64
+ description = """
65
+ This demo introduces an interactive playground for pretrained Detectron2 model.
66
+ Currently, two models are supported that were trained on COCO dataset:
67
+ * [Instance Segmentation](https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml): Identifies, outlines individual object instances.
68
+ * [Panoptic Segmentation](https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml): Unifies instance and semantic segmentation.
69
+ """
70
+ footer = "Made by eyepop.ai with ❤️."
71
+
72
+ with gr.Blocks() as demo:
73
+ gr.Markdown(title)
74
+ gr.Markdown(description)
75
+
76
+ with gr.Tab("From URL"):
77
+ url_input = gr.Textbox(label="Image URL", placeholder="https://images.unsplash.com/photo-1701226362119-cc86312846af?q=80&w=1587&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D")
78
+
79
+ with gr.Tab("From Image"):
80
+ image_input = gr.Image(type="numpy", label="Input Image")
81
 
82
+ min_score = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Minimum score")
83
 
84
+ model_name = gr.Radio(choices=[model["name"] for model in models], value=models[0]["name"], label="Select Detectron2 model")
85
 
86
+ output_image = gr.Image(type="pil", label="Output")
 
 
87
 
88
+ inference_button = gr.Button("Submit")
 
89
 
90
+ inference_button.click(fn=inference, inputs=[url_input, image_input, min_score, model_name], outputs=output_image)
 
91
 
92
+ gr.Markdown(footer)
93
 
94
+ demo.launch()
 
 
 
95