Spaces:
Running
on
Zero
Running
on
Zero
Commit
Β·
e1d5689
1
Parent(s):
a7087a4
add .gitignore and enhance image comparison in app.py and app_local.py
Browse files- .gitignore +3 -0
- app.py +22 -6
- app_local.py +22 -6
.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.pyc
|
| 2 |
+
*.pkl
|
| 3 |
+
*__pycache__*
|
app.py
CHANGED
|
@@ -301,20 +301,36 @@ def single_inference(image, model: str, progress=gr.Progress()):
|
|
| 301 |
return None, "β Please upload an image."
|
| 302 |
|
| 303 |
try:
|
|
|
|
|
|
|
|
|
|
| 304 |
# Convert image to numpy array if needed
|
| 305 |
if isinstance(image, str):
|
| 306 |
# If it's a file path
|
|
|
|
|
|
|
| 307 |
image = cv2.imread(image)
|
| 308 |
elif hasattr(image, 'save'):
|
| 309 |
# If it's a PIL Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 310 |
image = np.array(image)
|
| 311 |
if len(image.shape) == 3 and image.shape[2] == 3:
|
| 312 |
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 313 |
|
| 314 |
progress(0.1, desc=f"Running {model}")
|
| 315 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 316 |
progress(1.0, desc="Done")
|
| 317 |
-
return
|
| 318 |
|
| 319 |
finally:
|
| 320 |
# Clean up GPU memory after inference
|
|
@@ -401,21 +417,21 @@ def create_app():
|
|
| 401 |
return compare_models(image, default1, default2)
|
| 402 |
examples = gr.Examples(examples=ex_imgs, inputs=[img_input], outputs=[out_img, out_status], fn=compare_example_fn)
|
| 403 |
|
| 404 |
-
with gr.Tab("
|
| 405 |
with gr.Row():
|
| 406 |
img_input3 = gr.Image(label="Input Image")
|
| 407 |
with gr.Column():
|
| 408 |
m_single = gr.Dropdown(choices=model_choices, label="Model", value=default1)
|
| 409 |
btn3 = gr.Button("Run", variant="primary")
|
| 410 |
-
|
| 411 |
out_single_status = gr.Markdown()
|
| 412 |
-
btn3.click(single_inference, inputs=[img_input3, m_single], outputs=[
|
| 413 |
|
| 414 |
# Examples for single model
|
| 415 |
if ex_imgs:
|
| 416 |
def single_example_fn(image):
|
| 417 |
return single_inference(image, default1)
|
| 418 |
-
examples3 = gr.Examples(examples=ex_imgs, inputs=[img_input3], outputs=[
|
| 419 |
|
| 420 |
gr.Markdown("""
|
| 421 |
---
|
|
|
|
| 301 |
return None, "β Please upload an image."
|
| 302 |
|
| 303 |
try:
|
| 304 |
+
# Store original image for slider comparison
|
| 305 |
+
original_image = None
|
| 306 |
+
|
| 307 |
# Convert image to numpy array if needed
|
| 308 |
if isinstance(image, str):
|
| 309 |
# If it's a file path
|
| 310 |
+
original_image = cv2.imread(image)
|
| 311 |
+
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB) # Convert to RGB for display
|
| 312 |
image = cv2.imread(image)
|
| 313 |
elif hasattr(image, 'save'):
|
| 314 |
# If it's a PIL Image
|
| 315 |
+
original_image = np.array(image) # PIL images are already in RGB
|
| 316 |
+
image = np.array(image)
|
| 317 |
+
if len(image.shape) == 3 and image.shape[2] == 3:
|
| 318 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 319 |
+
else:
|
| 320 |
+
# If it's already a numpy array (from Gradio)
|
| 321 |
+
original_image = np.array(image) # Keep original in RGB
|
| 322 |
image = np.array(image)
|
| 323 |
if len(image.shape) == 3 and image.shape[2] == 3:
|
| 324 |
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 325 |
|
| 326 |
progress(0.1, desc=f"Running {model}")
|
| 327 |
+
depth_result, label = run_model(model, image)
|
| 328 |
+
|
| 329 |
+
# Convert depth result back to RGB for slider (depth_result is already in RGB from colorize_depth)
|
| 330 |
+
depth_result_rgb = depth_result # colorize_depth already returns RGB
|
| 331 |
+
|
| 332 |
progress(1.0, desc="Done")
|
| 333 |
+
return (original_image, depth_result_rgb), f"**Original** vs **{label}**"
|
| 334 |
|
| 335 |
finally:
|
| 336 |
# Clean up GPU memory after inference
|
|
|
|
| 417 |
return compare_models(image, default1, default2)
|
| 418 |
examples = gr.Examples(examples=ex_imgs, inputs=[img_input], outputs=[out_img, out_status], fn=compare_example_fn)
|
| 419 |
|
| 420 |
+
with gr.Tab("οΏ½ Single Model"):
|
| 421 |
with gr.Row():
|
| 422 |
img_input3 = gr.Image(label="Input Image")
|
| 423 |
with gr.Column():
|
| 424 |
m_single = gr.Dropdown(choices=model_choices, label="Model", value=default1)
|
| 425 |
btn3 = gr.Button("Run", variant="primary")
|
| 426 |
+
single_slider = gr.ImageSlider(label="Original vs Depth")
|
| 427 |
out_single_status = gr.Markdown()
|
| 428 |
+
btn3.click(single_inference, inputs=[img_input3, m_single], outputs=[single_slider, out_single_status], show_progress=True)
|
| 429 |
|
| 430 |
# Examples for single model
|
| 431 |
if ex_imgs:
|
| 432 |
def single_example_fn(image):
|
| 433 |
return single_inference(image, default1)
|
| 434 |
+
examples3 = gr.Examples(examples=ex_imgs, inputs=[img_input3], outputs=[single_slider, out_single_status], fn=single_example_fn)
|
| 435 |
|
| 436 |
gr.Markdown("""
|
| 437 |
---
|
app_local.py
CHANGED
|
@@ -247,20 +247,36 @@ def single_inference(image, model: str, progress=gr.Progress()):
|
|
| 247 |
if image is None:
|
| 248 |
return None, "β Please upload an image."
|
| 249 |
|
|
|
|
|
|
|
|
|
|
| 250 |
# Convert image to numpy array if needed
|
| 251 |
if isinstance(image, str):
|
| 252 |
# If it's a file path
|
|
|
|
|
|
|
| 253 |
image = cv2.imread(image)
|
| 254 |
elif hasattr(image, 'save'):
|
| 255 |
# If it's a PIL Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
image = np.array(image)
|
| 257 |
if len(image.shape) == 3 and image.shape[2] == 3:
|
| 258 |
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 259 |
|
| 260 |
progress(0.1, desc=f"Running {model}")
|
| 261 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 262 |
progress(1.0, desc="Done")
|
| 263 |
-
return
|
| 264 |
|
| 265 |
def get_example_images() -> List[str]:
|
| 266 |
import re
|
|
@@ -338,20 +354,20 @@ def create_app():
|
|
| 338 |
def compare_example_fn(image):
|
| 339 |
return compare_models(image, default1, default2)
|
| 340 |
examples = gr.Examples(examples=ex_imgs, inputs=[img_input], outputs=[out_img, out_status], fn=compare_example_fn)
|
| 341 |
-
with gr.Tab(" Single Model"):
|
| 342 |
with gr.Row():
|
| 343 |
img_input3 = gr.Image(label="Input Image")
|
| 344 |
m_single = gr.Dropdown(choices=model_choices, label="Model", value=default1)
|
| 345 |
btn3 = gr.Button("Run", variant="primary")
|
| 346 |
-
|
| 347 |
out_single_status = gr.Markdown()
|
| 348 |
-
btn3.click(single_inference, inputs=[img_input3, m_single], outputs=[
|
| 349 |
|
| 350 |
# Simple Examples - Tab 3
|
| 351 |
if ex_imgs:
|
| 352 |
def single_example_fn(image):
|
| 353 |
return single_inference(image, default1)
|
| 354 |
-
examples3 = gr.Examples(examples=ex_imgs, inputs=[img_input3], outputs=[
|
| 355 |
gr.Markdown("""
|
| 356 |
---
|
| 357 |
- **v1**: [Depth Anything v1](https://github.com/LiheYoung/Depth-Anything)
|
|
|
|
| 247 |
if image is None:
|
| 248 |
return None, "β Please upload an image."
|
| 249 |
|
| 250 |
+
# Store original image for slider comparison
|
| 251 |
+
original_image = None
|
| 252 |
+
|
| 253 |
# Convert image to numpy array if needed
|
| 254 |
if isinstance(image, str):
|
| 255 |
# If it's a file path
|
| 256 |
+
original_image = cv2.imread(image)
|
| 257 |
+
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB) # Convert to RGB for display
|
| 258 |
image = cv2.imread(image)
|
| 259 |
elif hasattr(image, 'save'):
|
| 260 |
# If it's a PIL Image
|
| 261 |
+
original_image = np.array(image) # PIL images are already in RGB
|
| 262 |
+
image = np.array(image)
|
| 263 |
+
if len(image.shape) == 3 and image.shape[2] == 3:
|
| 264 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 265 |
+
else:
|
| 266 |
+
# If it's already a numpy array (from Gradio)
|
| 267 |
+
original_image = np.array(image) # Keep original in RGB
|
| 268 |
image = np.array(image)
|
| 269 |
if len(image.shape) == 3 and image.shape[2] == 3:
|
| 270 |
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 271 |
|
| 272 |
progress(0.1, desc=f"Running {model}")
|
| 273 |
+
depth_result, label = run_model(model, image)
|
| 274 |
+
|
| 275 |
+
# Convert depth result back to RGB for slider (depth_result is already in RGB from colorize_depth)
|
| 276 |
+
depth_result_rgb = depth_result # colorize_depth already returns RGB
|
| 277 |
+
|
| 278 |
progress(1.0, desc="Done")
|
| 279 |
+
return (original_image, depth_result_rgb), f"**Original** vs **{label}**"
|
| 280 |
|
| 281 |
def get_example_images() -> List[str]:
|
| 282 |
import re
|
|
|
|
| 354 |
def compare_example_fn(image):
|
| 355 |
return compare_models(image, default1, default2)
|
| 356 |
examples = gr.Examples(examples=ex_imgs, inputs=[img_input], outputs=[out_img, out_status], fn=compare_example_fn)
|
| 357 |
+
with gr.Tab("π· Single Model"):
|
| 358 |
with gr.Row():
|
| 359 |
img_input3 = gr.Image(label="Input Image")
|
| 360 |
m_single = gr.Dropdown(choices=model_choices, label="Model", value=default1)
|
| 361 |
btn3 = gr.Button("Run", variant="primary")
|
| 362 |
+
single_slider = gr.ImageSlider(label="Original vs Depth")
|
| 363 |
out_single_status = gr.Markdown()
|
| 364 |
+
btn3.click(single_inference, inputs=[img_input3, m_single], outputs=[single_slider, out_single_status], show_progress=True)
|
| 365 |
|
| 366 |
# Simple Examples - Tab 3
|
| 367 |
if ex_imgs:
|
| 368 |
def single_example_fn(image):
|
| 369 |
return single_inference(image, default1)
|
| 370 |
+
examples3 = gr.Examples(examples=ex_imgs, inputs=[img_input3], outputs=[single_slider, out_single_status], fn=single_example_fn)
|
| 371 |
gr.Markdown("""
|
| 372 |
---
|
| 373 |
- **v1**: [Depth Anything v1](https://github.com/LiheYoung/Depth-Anything)
|