Spaces:
Running on Zero
Running on Zero
Commit ·
ae90ebf
1
Parent(s): ba23146
reorder gr blocks
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
from typing import
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
import spaces
|
|
@@ -26,16 +26,15 @@ It can be used as an extention to Segment-Anything Model (SAM) or used as a stan
|
|
| 26 |
"""
|
| 27 |
EXAMPLES = [
|
| 28 |
[
|
| 29 |
-
"tiny",
|
| 30 |
-
0.5,
|
| 31 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/master/demo/rgb_images/000000.png",
|
| 32 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/1.jpg",
|
| 33 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/2.jpg",
|
| 34 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/3.jpg",
|
| 35 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/4.jpg",
|
| 36 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/5.jpg",
|
| 37 |
-
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/6.jpg"
|
| 38 |
-
|
|
|
|
| 39 |
]
|
| 40 |
|
| 41 |
DEVICE = torch.device('cuda')
|
|
@@ -54,11 +53,10 @@ DOUNSEEN_MODEL = load_dounseen_model(device=DEVICE)
|
|
| 54 |
@torch.inference_mode()
|
| 55 |
@torch.autocast(device_type="cuda", dtype=torch.bfloat16)
|
| 56 |
def process(
|
| 57 |
-
checkpoint_dropdown,
|
| 58 |
-
threshold,
|
| 59 |
image_input,
|
| 60 |
-
object_image1=None, object_image2=None, object_image3=None, object_image4=None, object_image5=None, object_image6=None
|
| 61 |
-
|
|
|
|
| 62 |
model = MASK_GENERATORS[checkpoint_dropdown]
|
| 63 |
image = np.array(image_input.convert("RGB"))
|
| 64 |
sam2_result = model.generate(image)
|
|
@@ -85,19 +83,25 @@ def process(
|
|
| 85 |
|
| 86 |
with gr.Blocks() as demo:
|
| 87 |
gr.Markdown(MARKDOWN)
|
| 88 |
-
with gr.Row():
|
| 89 |
-
checkpoint_dropdown_component = gr.Dropdown(
|
| 90 |
-
choices=CHECKPOINT_NAMES,
|
| 91 |
-
value=CHECKPOINT_NAMES[0],
|
| 92 |
-
label="Checkpoint", info="Select a SAM2 checkpoint to use.",
|
| 93 |
-
interactive=True
|
| 94 |
-
)
|
| 95 |
-
threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Threshold", value=0.5)
|
| 96 |
with gr.Row():
|
| 97 |
with gr.Column():
|
| 98 |
image_input_component = gr.Image(type='pil', label='Upload image')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
submit_button_component = gr.Button(value='Submit', variant='primary')
|
| 100 |
-
object_images = [gr.Image(type="pil", label=f"Object Image {i + 1}", width=256, height=256) for i in range(6)] # Set a smaller display size for the object images
|
| 101 |
with gr.Column():
|
| 102 |
image_output_sam = gr.Image(type='pil', label='SAM2 Output')
|
| 103 |
image_output_dounseen = gr.Image(type='pil', label='DoUnseen Output')
|
|
@@ -105,11 +109,7 @@ with gr.Blocks() as demo:
|
|
| 105 |
gr.Examples(
|
| 106 |
fn=process,
|
| 107 |
examples=EXAMPLES,
|
| 108 |
-
inputs=[
|
| 109 |
-
checkpoint_dropdown_component,
|
| 110 |
-
threshold,
|
| 111 |
-
image_input_component,
|
| 112 |
-
] + object_images,
|
| 113 |
outputs=[image_output_sam, image_output_dounseen],
|
| 114 |
cache_examples=False,
|
| 115 |
run_on_click=True
|
|
@@ -117,11 +117,7 @@ with gr.Blocks() as demo:
|
|
| 117 |
|
| 118 |
submit_button_component.click(
|
| 119 |
fn=process,
|
| 120 |
-
inputs=[
|
| 121 |
-
checkpoint_dropdown_component,
|
| 122 |
-
threshold,
|
| 123 |
-
image_input_component,
|
| 124 |
-
] + object_images,
|
| 125 |
outputs=[image_output_sam, image_output_dounseen]
|
| 126 |
)
|
| 127 |
|
|
|
|
| 1 |
+
from typing import Tuple
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
import spaces
|
|
|
|
| 26 |
"""
|
| 27 |
EXAMPLES = [
|
| 28 |
[
|
|
|
|
|
|
|
| 29 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/master/demo/rgb_images/000000.png",
|
| 30 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/1.jpg",
|
| 31 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/2.jpg",
|
| 32 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/3.jpg",
|
| 33 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/4.jpg",
|
| 34 |
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/5.jpg",
|
| 35 |
+
"https://raw.githubusercontent.com/AnasIbrahim/image_agnostic_segmentation/CASE_release/demo/objects_gallery/obj_000001/6.jpg",
|
| 36 |
+
"tiny",
|
| 37 |
+
]
|
| 38 |
]
|
| 39 |
|
| 40 |
DEVICE = torch.device('cuda')
|
|
|
|
| 53 |
@torch.inference_mode()
|
| 54 |
@torch.autocast(device_type="cuda", dtype=torch.bfloat16)
|
| 55 |
def process(
|
|
|
|
|
|
|
| 56 |
image_input,
|
| 57 |
+
object_image1=None, object_image2=None, object_image3=None, object_image4=None, object_image5=None, object_image6=None,
|
| 58 |
+
checkpoint_dropdown=CHECKPOINT_NAMES[0],
|
| 59 |
+
) -> Tuple[Image.Image, Image.Image]:
|
| 60 |
model = MASK_GENERATORS[checkpoint_dropdown]
|
| 61 |
image = np.array(image_input.convert("RGB"))
|
| 62 |
sam2_result = model.generate(image)
|
|
|
|
| 83 |
|
| 84 |
with gr.Blocks() as demo:
|
| 85 |
gr.Markdown(MARKDOWN)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
with gr.Row():
|
| 87 |
with gr.Column():
|
| 88 |
image_input_component = gr.Image(type='pil', label='Upload image')
|
| 89 |
+
with gr.Row():
|
| 90 |
+
object_image_1 = gr.Image(type='pil', label=f'Object Image 1')
|
| 91 |
+
object_image_2 = gr.Image(type='pil', label=f'Object Image 2')
|
| 92 |
+
object_image_3 = gr.Image(type='pil', label=f'Object Image 3')
|
| 93 |
+
with gr.Row():
|
| 94 |
+
object_image_4 = gr.Image(type='pil', label=f'Object Image 4')
|
| 95 |
+
object_image_5 = gr.Image(type='pil', label=f'Object Image 5')
|
| 96 |
+
object_image_6 = gr.Image(type='pil', label=f'Object Image 6')
|
| 97 |
+
object_images = [object_image_1, object_image_2, object_image_3, object_image_4, object_image_5, object_image_6]
|
| 98 |
+
checkpoint_dropdown_component = gr.Dropdown(
|
| 99 |
+
choices=CHECKPOINT_NAMES,
|
| 100 |
+
value=CHECKPOINT_NAMES[0],
|
| 101 |
+
label="Checkpoint", info="Select a SAM2 checkpoint to use.",
|
| 102 |
+
interactive=True
|
| 103 |
+
)
|
| 104 |
submit_button_component = gr.Button(value='Submit', variant='primary')
|
|
|
|
| 105 |
with gr.Column():
|
| 106 |
image_output_sam = gr.Image(type='pil', label='SAM2 Output')
|
| 107 |
image_output_dounseen = gr.Image(type='pil', label='DoUnseen Output')
|
|
|
|
| 109 |
gr.Examples(
|
| 110 |
fn=process,
|
| 111 |
examples=EXAMPLES,
|
| 112 |
+
inputs= [image_input_component] + object_images + [checkpoint_dropdown_component],
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
outputs=[image_output_sam, image_output_dounseen],
|
| 114 |
cache_examples=False,
|
| 115 |
run_on_click=True
|
|
|
|
| 117 |
|
| 118 |
submit_button_component.click(
|
| 119 |
fn=process,
|
| 120 |
+
inputs=[image_input_component] + object_images + [checkpoint_dropdown_component],
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
outputs=[image_output_sam, image_output_dounseen]
|
| 122 |
)
|
| 123 |
|