Spaces:
Runtime error
Runtime error
update
Browse files- MANIFEST.in +0 -1
- app.py +112 -1
- demo.py +4 -9
- pyproject.toml +0 -6
- setup.cfg +0 -11
- setup.py +0 -58
MANIFEST.in
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
include requirements.txt
|
|
|
|
|
|
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from demo import automask_image_app, automask_video_app
|
| 3 |
|
| 4 |
|
| 5 |
def image_app():
|
|
@@ -115,6 +115,114 @@ def video_app():
|
|
| 115 |
)
|
| 116 |
|
| 117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
def metaseg_app():
|
| 119 |
app = gr.Blocks()
|
| 120 |
with app:
|
|
@@ -134,6 +242,9 @@ def metaseg_app():
|
|
| 134 |
image_app()
|
| 135 |
with gr.Tab("Video"):
|
| 136 |
video_app()
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
app.queue(concurrency_count=1)
|
| 139 |
app.launch(debug=True, enable_queue=True)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from demo import automask_image_app, automask_video_app, manual_app, sahi_autoseg_app
|
| 3 |
|
| 4 |
|
| 5 |
def image_app():
|
|
|
|
| 115 |
)
|
| 116 |
|
| 117 |
|
| 118 |
+
def sahi_app():
|
| 119 |
+
with gr.Blocks():
|
| 120 |
+
with gr.Row():
|
| 121 |
+
with gr.Column():
|
| 122 |
+
sahi_image_file = gr.Image(type="filepath").style(height=260)
|
| 123 |
+
|
| 124 |
+
with gr.Row():
|
| 125 |
+
with gr.Column():
|
| 126 |
+
sahi_autoseg_model_type = gr.Dropdown(
|
| 127 |
+
choices=[
|
| 128 |
+
"vit_h",
|
| 129 |
+
"vit_l",
|
| 130 |
+
"vit_b",
|
| 131 |
+
],
|
| 132 |
+
value="vit_l",
|
| 133 |
+
label="Sam Model Type",
|
| 134 |
+
)
|
| 135 |
+
sahi_model_type = gr.Dropdown(
|
| 136 |
+
choices=[
|
| 137 |
+
"yolov5",
|
| 138 |
+
"yolov8",
|
| 139 |
+
],
|
| 140 |
+
value="yolov5",
|
| 141 |
+
label="Detector Model Type",
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
sahi_model_path = gr.Dropdown(
|
| 145 |
+
choices=[
|
| 146 |
+
"yolov5m",
|
| 147 |
+
"yolov5l",
|
| 148 |
+
"yolov5m6",
|
| 149 |
+
"yolov5l6",
|
| 150 |
+
],
|
| 151 |
+
value="yolov5m",
|
| 152 |
+
label="Detector Model Path",
|
| 153 |
+
)
|
| 154 |
+
sahi_conf_th = gr.Slider(
|
| 155 |
+
minimum=0,
|
| 156 |
+
maximum=1,
|
| 157 |
+
step=0.1,
|
| 158 |
+
value=0.2,
|
| 159 |
+
label="Confidence Threshold",
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
sahi_image_size = gr.Slider(
|
| 163 |
+
minimum=0,
|
| 164 |
+
maximum=1600,
|
| 165 |
+
step=32,
|
| 166 |
+
value=640,
|
| 167 |
+
label="Image Size",
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
sahi_slice_height = gr.Slider(
|
| 171 |
+
minimum=0,
|
| 172 |
+
maximum=640,
|
| 173 |
+
step=32,
|
| 174 |
+
value=256,
|
| 175 |
+
label="Slice Height",
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
sahi_slice_width = gr.Slider(
|
| 179 |
+
minimum=0,
|
| 180 |
+
maximum=640,
|
| 181 |
+
step=32,
|
| 182 |
+
value=256,
|
| 183 |
+
label="Slice Width",
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
sahi_overlap_height = gr.Slider(
|
| 187 |
+
minimum=0,
|
| 188 |
+
maximum=1,
|
| 189 |
+
step=0.1,
|
| 190 |
+
value=0.2,
|
| 191 |
+
label="Overlap Height",
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
sahi_overlap_width = gr.Slider(
|
| 195 |
+
minimum=0,
|
| 196 |
+
maximum=1,
|
| 197 |
+
step=0.1,
|
| 198 |
+
value=0.2,
|
| 199 |
+
label="Overlap Width",
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
sahi_image_predict = gr.Button(value="Generator")
|
| 204 |
+
|
| 205 |
+
with gr.Column():
|
| 206 |
+
output_image = gr.Image()
|
| 207 |
+
|
| 208 |
+
sahi_image_predict.click(
|
| 209 |
+
fn=sahi_autoseg_app,
|
| 210 |
+
inputs=[
|
| 211 |
+
sahi_image_file,
|
| 212 |
+
sahi_autoseg_model_type,
|
| 213 |
+
sahi_model_type,
|
| 214 |
+
sahi_model_path,
|
| 215 |
+
sahi_conf_th,
|
| 216 |
+
sahi_image_size,
|
| 217 |
+
sahi_slice_height,
|
| 218 |
+
sahi_slice_width,
|
| 219 |
+
sahi_overlap_height,
|
| 220 |
+
sahi_overlap_width,
|
| 221 |
+
|
| 222 |
+
],
|
| 223 |
+
outputs=[output_image],
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
def metaseg_app():
|
| 227 |
app = gr.Blocks()
|
| 228 |
with app:
|
|
|
|
| 242 |
image_app()
|
| 243 |
with gr.Tab("Video"):
|
| 244 |
video_app()
|
| 245 |
+
with gr.Tab("SAHI"):
|
| 246 |
+
sahi_app()
|
| 247 |
+
|
| 248 |
|
| 249 |
app.queue(concurrency_count=1)
|
| 250 |
app.launch(debug=True, enable_queue=True)
|
demo.py
CHANGED
|
@@ -2,7 +2,6 @@ from metaseg import SegAutoMaskPredictor, SegManualMaskPredictor, SahiAutoSegmen
|
|
| 2 |
|
| 3 |
# For image
|
| 4 |
|
| 5 |
-
|
| 6 |
def automask_image_app(image_path, model_type, points_per_side, points_per_batch, min_area):
|
| 7 |
SegAutoMaskPredictor().image_predict(
|
| 8 |
source=image_path,
|
|
@@ -19,7 +18,6 @@ def automask_image_app(image_path, model_type, points_per_side, points_per_batch
|
|
| 19 |
|
| 20 |
# For video
|
| 21 |
|
| 22 |
-
|
| 23 |
def automask_video_app(video_path, model_type, points_per_side, points_per_batch, min_area):
|
| 24 |
SegAutoMaskPredictor().video_predict(
|
| 25 |
source=video_path,
|
|
@@ -36,7 +34,6 @@ def automask_video_app(video_path, model_type, points_per_side, points_per_batch
|
|
| 36 |
|
| 37 |
# For manuel box and point selection
|
| 38 |
|
| 39 |
-
|
| 40 |
def manual_app(image_path, model_type, input_point, input_label, input_box, multimask_output, random_color):
|
| 41 |
SegManualMaskPredictor().image_predict(
|
| 42 |
source=image_path,
|
|
@@ -55,11 +52,9 @@ def manual_app(image_path, model_type, input_point, input_label, input_box, mult
|
|
| 55 |
|
| 56 |
# For sahi sliced prediction
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
def sahi_app(
|
| 62 |
image_path,
|
|
|
|
| 63 |
detection_model_type,
|
| 64 |
detection_model_path,
|
| 65 |
conf_th,
|
|
@@ -81,9 +76,9 @@ def sahi_app(
|
|
| 81 |
overlap_width_ratio=overlap_width_ratio,
|
| 82 |
)
|
| 83 |
|
| 84 |
-
|
| 85 |
source=image_path,
|
| 86 |
-
model_type=
|
| 87 |
input_box=boxes,
|
| 88 |
multimask_output=False,
|
| 89 |
random_color=False,
|
|
|
|
| 2 |
|
| 3 |
# For image
|
| 4 |
|
|
|
|
| 5 |
def automask_image_app(image_path, model_type, points_per_side, points_per_batch, min_area):
|
| 6 |
SegAutoMaskPredictor().image_predict(
|
| 7 |
source=image_path,
|
|
|
|
| 18 |
|
| 19 |
# For video
|
| 20 |
|
|
|
|
| 21 |
def automask_video_app(video_path, model_type, points_per_side, points_per_batch, min_area):
|
| 22 |
SegAutoMaskPredictor().video_predict(
|
| 23 |
source=video_path,
|
|
|
|
| 34 |
|
| 35 |
# For manuel box and point selection
|
| 36 |
|
|
|
|
| 37 |
def manual_app(image_path, model_type, input_point, input_label, input_box, multimask_output, random_color):
|
| 38 |
SegManualMaskPredictor().image_predict(
|
| 39 |
source=image_path,
|
|
|
|
| 52 |
|
| 53 |
# For sahi sliced prediction
|
| 54 |
|
| 55 |
+
def sahi_autoseg_app(
|
|
|
|
|
|
|
|
|
|
| 56 |
image_path,
|
| 57 |
+
sam_model_type,
|
| 58 |
detection_model_type,
|
| 59 |
detection_model_path,
|
| 60 |
conf_th,
|
|
|
|
| 76 |
overlap_width_ratio=overlap_width_ratio,
|
| 77 |
)
|
| 78 |
|
| 79 |
+
SahiAutoSegmentation().predict(
|
| 80 |
source=image_path,
|
| 81 |
+
model_type=sam_model_type,
|
| 82 |
input_box=boxes,
|
| 83 |
multimask_output=False,
|
| 84 |
random_color=False,
|
pyproject.toml
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
[tool.black]
|
| 2 |
-
line-length = 120
|
| 3 |
-
|
| 4 |
-
[tool.isort]
|
| 5 |
-
line_length = 120
|
| 6 |
-
profile = "black"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
setup.cfg
DELETED
|
@@ -1,11 +0,0 @@
|
|
| 1 |
-
[isort]
|
| 2 |
-
line_length=100
|
| 3 |
-
multi_line_output=3
|
| 4 |
-
include_trailing_comma=True
|
| 5 |
-
known_standard_library=numpy,setuptools
|
| 6 |
-
skip_glob=*/__init__.py
|
| 7 |
-
known_myself=segment_anything
|
| 8 |
-
known_third_party=matplotlib,cv2,torch,torchvision,pycocotools,onnx,black,isort
|
| 9 |
-
no_lines_before=STDLIB,THIRDPARTY
|
| 10 |
-
sections=FUTURE,STDLIB,THIRDPARTY,MYSELF,FIRSTPARTY,LOCALFOLDER
|
| 11 |
-
default_section=FIRSTPARTY
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
setup.py
DELETED
|
@@ -1,58 +0,0 @@
|
|
| 1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
-
# All rights reserved.
|
| 3 |
-
|
| 4 |
-
# This source code is licensed under the license found in the
|
| 5 |
-
# LICENSE file in the root directory of this source tree.
|
| 6 |
-
|
| 7 |
-
import io
|
| 8 |
-
import os
|
| 9 |
-
import re
|
| 10 |
-
|
| 11 |
-
from setuptools import find_packages, setup
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
def get_long_description():
|
| 15 |
-
base_dir = os.path.abspath(os.path.dirname(__file__))
|
| 16 |
-
with io.open(os.path.join(base_dir, "README.md"), encoding="utf-8") as f:
|
| 17 |
-
return f.read()
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
def get_requirements():
|
| 21 |
-
with open("requirements.txt") as f:
|
| 22 |
-
return f.read().splitlines()
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
def get_version():
|
| 26 |
-
current_dir = os.path.abspath(os.path.dirname(__file__))
|
| 27 |
-
version_file = os.path.join(current_dir, "metaseg", "__init__.py")
|
| 28 |
-
with io.open(version_file, encoding="utf-8") as f:
|
| 29 |
-
return re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', f.read(), re.M).group(1)
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
_ALL_REQUIREMENTS = ["matplotlib", "pycocotools", "opencv-python", "onnx", "onnxruntime"]
|
| 33 |
-
|
| 34 |
-
_DEV_REQUIREMENTS = [
|
| 35 |
-
"black==23.*",
|
| 36 |
-
"isort==5.12.0",
|
| 37 |
-
"flake8",
|
| 38 |
-
"mypy",
|
| 39 |
-
]
|
| 40 |
-
|
| 41 |
-
extras = {
|
| 42 |
-
"all": _ALL_REQUIREMENTS,
|
| 43 |
-
"dev": _DEV_REQUIREMENTS,
|
| 44 |
-
}
|
| 45 |
-
|
| 46 |
-
setup(
|
| 47 |
-
name="metaseg",
|
| 48 |
-
license="Apache-2.0",
|
| 49 |
-
author="kadirnar",
|
| 50 |
-
long_description=get_long_description(),
|
| 51 |
-
long_description_content_type="text/markdown",
|
| 52 |
-
url="https://github.com/kadirnar/segment-anything-pip",
|
| 53 |
-
version=get_version(),
|
| 54 |
-
install_requires=get_requirements(),
|
| 55 |
-
packages=find_packages(exclude=("notebook")),
|
| 56 |
-
extras_require=extras,
|
| 57 |
-
python_requires=">=3.8",
|
| 58 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|