kadzon commited on
Commit
8960ee7
·
1 Parent(s): 72d33d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -1,14 +1,14 @@
1
- # MegaDetector v5 Demo
2
  import gradio as gr
3
- #import torch
4
- #import torchvision
5
  import numpy as np
6
  from PIL import Image
7
 
8
  # Load MegaDetector v5a model
9
- # TODO: Allow user selectable model?
10
  # models = ["model_weights/md_v5a.0.0.pt","model_weights/md_v5b.0.0.pt"]
11
- #model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt")
12
 
13
  def yolo(im, size=640):
14
  g = (size / max(im.size)) # gain
@@ -30,7 +30,12 @@ outputs = gr.outputs.Image(type="pil", label="Output Image")
30
 
31
  title = "MegaDetector and DeepLabcutLive"
32
  description = "Interact with MegaDetector and DeeplabCutLive for pose estimation"
33
-
34
 
35
  examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
 
 
 
 
 
36
  gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True)
 
1
+ # MegaDetector v5 and DLC Demo
2
  import gradio as gr
3
+ import torch
4
+ import torchvision
5
  import numpy as np
6
  from PIL import Image
7
 
8
  # Load MegaDetector v5a model
9
+
10
  # models = ["model_weights/md_v5a.0.0.pt","model_weights/md_v5b.0.0.pt"]
11
+ model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt")
12
 
13
  def yolo(im, size=640):
14
  g = (size / max(im.size)) # gain
 
30
 
31
  title = "MegaDetector and DeepLabcutLive"
32
  description = "Interact with MegaDetector and DeeplabCutLive for pose estimation"
33
+ article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
34
 
35
  examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
36
+ gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True)
37
+
38
+
39
+
40
+ examples = [['data/owl.jpg'], ['data/snake.jpg'],['data/beluga.jpg'],['data/rhino.jpg']]
41
  gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True)