foryahasake commited on
Commit
731fbc6
·
verified ·
1 Parent(s): 8555079

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -36
app.py CHANGED
@@ -5,6 +5,7 @@ except:
5
  os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
6
 
7
  import cv2
 
8
  import torch
9
  from matplotlib.pyplot import axis
10
  import gradio as gr
@@ -41,6 +42,16 @@ my_metadata.thing_classes = ["None", "BAD_BILLBOARD","BROKEN_SIGNAGE","CLUTTER_S
41
  if not torch.cuda.is_available():
42
  cfg.MODEL.DEVICE = "cpu"
43
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  def inference(image_url, image, min_score):
46
  if image_url:
@@ -64,6 +75,11 @@ def inference(image_url, image, min_score):
64
  return out.get_image()
65
 
66
 
 
 
 
 
 
67
  title = "Smartathon Phase2 Demo - Baseer"
68
  description = "This demo introduces an interactive playground for our trained Detectron2 model."
69
  article = '<p>Detectron model is available from our repository <a href="https://github.com/asalhi/Smartathon-Baseer">here</a>.</p>'
@@ -85,52 +101,39 @@ article = '<p>Detectron model is available from our repository <a href="https://
85
  # #examples=['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg']
86
 
87
 
88
- with gr.Blocks(title=title,
89
- css=".gradio-container {background:white;}"
90
- ) as demo:
91
 
92
- gr.HTML("""<h4 style="font-weight:bold; text-align:center; color:navy;">"Smartathon Phase2 Demo - Baseer"</h4>""")
93
- # #
94
- #gr.HTML("""<h5 style="color:navy;">1- Select an example by clicking a thumbnail below.</h5>""")
95
- gr.HTML("""<h5 style="color:navy;">1- Select an example by clicking a thumbnail below.<br>
96
- 2- Or upload an image by clicking on the canvas.<br>
97
- 3- Or insert direct url of an image.</h5>""")
98
 
 
 
 
 
 
 
 
99
  with gr.Row():
100
  with gr.Column():
101
  #gr.HTML("""<h5 style="color:navy;">3- Or insert direct url of an image.</h5>""")
102
  input_url = gr.Textbox(label="Image URL", placeholder="")
103
  #gr.HTML("""<h5 style="color:navy;">2- Or upload an image by clicking on the canvas.<br></h5>""")
104
- input_image = gr.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image")
 
105
  gr.HTML("""<h5 style="color:navy;">4- You can use this slider to control boxes min score: </h5>""")
106
  sliderr = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, label="Minimum score")
107
  output_image = gr.Image(type="pil", label="Output")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
- # gr.Interface(
110
- # inference,
111
- # [gr.inputs.Textbox(label="Image URL", placeholder=""),
112
- # gr.inputs.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image"),
113
- # gr.Slider(minimum=0.0, maximum=1.0, value=0.4, label="Minimum score"),
114
- # ],
115
-
116
-
117
- gr.Examples(['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg'], inputs=input_image)
118
-
119
- #gr.HTML("""<br/>""")
120
-
121
-
122
-
123
-
124
-
125
- gr.HTML("""<h5 style="color:navy;">5- Then, click "Submit" button to predict object instances. It will take about 15-20 seconds (on cpu)</h5>""")
126
- send_btn = gr.Button("Submit")
127
- send_btn.click(fn=inference, inputs=[input_url,input_image,sliderr], outputs=[output_image], api_name="find")
128
-
129
- #gr.HTML("""<h5 style="color:navy;">Reference</h5>""")
130
- #gr.HTML("""<ul>""")
131
- gr.HTML("""<h5 style="color:navy;">Detectron model is available from our repository <a href="https://github.com/asalhi/Smartathon-Baseer">here</a>.</h5>""")
132
- #gr.HTML("""</ul>""")
133
 
134
 
135
- #demo.queue()
136
- demo.launch() # debug=True)
 
5
  os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
6
 
7
  import cv2
8
+ import supervision as sv
9
  import torch
10
  from matplotlib.pyplot import axis
11
  import gradio as gr
 
42
  if not torch.cuda.is_available():
43
  cfg.MODEL.DEVICE = "cpu"
44
 
45
+
46
+ def predict_frame(frame,_):
47
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.3
48
+ predictor = DefaultPredictor(cfg)
49
+ outputs = predictor(frame)
50
+ v = Visualizer(frame[:,:,::-1], my_metadata, scale=1.2, instance_mode=ColorMode.IMAGE )
51
+ out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
52
+ return out.get_image()
53
+
54
+
55
 
56
  def inference(image_url, image, min_score):
57
  if image_url:
 
75
  return out.get_image()
76
 
77
 
78
+ def infer_video(video_path):
79
+ sv.process_video(source_path=video_path, target_path=f"result.mp4", callback=predict_frame)
80
+ return f"result.mp4"
81
+
82
+
83
  title = "Smartathon Phase2 Demo - Baseer"
84
  description = "This demo introduces an interactive playground for our trained Detectron2 model."
85
  article = '<p>Detectron model is available from our repository <a href="https://github.com/asalhi/Smartathon-Baseer">here</a>.</p>'
 
101
  # #examples=['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg']
102
 
103
 
 
 
 
104
 
 
 
 
 
 
 
105
 
106
+ gr.Examples(['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg'], inputs=input_image)
107
+
108
+
109
+
110
+
111
+
112
+
113
  with gr.Row():
114
  with gr.Column():
115
  #gr.HTML("""<h5 style="color:navy;">3- Or insert direct url of an image.</h5>""")
116
  input_url = gr.Textbox(label="Image URL", placeholder="")
117
  #gr.HTML("""<h5 style="color:navy;">2- Or upload an image by clicking on the canvas.<br></h5>""")
118
+ input_image = gr.Image(type="filepath", image_mode="RGB", sources="upload", label="Input Image")
119
+ input_video = gr.Video(format="mp4",sources="upload", label="Input video" )
120
  gr.HTML("""<h5 style="color:navy;">4- You can use this slider to control boxes min score: </h5>""")
121
  sliderr = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, label="Minimum score")
122
  output_image = gr.Image(type="pil", label="Output")
123
+ output_video = gr.Video(format="mp4", label="Output")
124
+
125
+
126
+ img_interface = gr.Interface(
127
+ fn=inference,
128
+ inputs=[input_url,input_image,sliderr], outputs=[output_image], api_name="find"
129
+ )
130
+ video_interface = gr.Interface(
131
+ fn=infer_video,
132
+ inputs=[input_video], outputs=[output_video], api_name="vid"
133
+ )
134
+ demo = gr.TabbedInterface([img_interface, video_interface], ["Image Upload", "Video Upload"])
135
+
136
+ demo.launch()
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139