Spaces:
Running
Running
Upload app.py
Browse files
app.py
CHANGED
|
@@ -271,7 +271,7 @@ def predict_depth(input_file, colormap_choice):
|
|
| 271 |
|
| 272 |
except Exception as e:
|
| 273 |
print(f"Error during inference: {str(e)}")
|
| 274 |
-
return None
|
| 275 |
|
| 276 |
|
| 277 |
def capture_and_predict(camera_image, colormap_choice):
|
|
@@ -426,16 +426,49 @@ with gr.Blocks(title="Depth Anything AC - Depth Estimation Demo", theme=gr.theme
|
|
| 426 |
if os.path.exists(f"toyset/{vid_file}"):
|
| 427 |
video_examples.append([f"toyset/{vid_file}", "Spectral"])
|
| 428 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 429 |
if image_examples:
|
| 430 |
gr.Examples(
|
| 431 |
examples=image_examples,
|
| 432 |
inputs=[upload_image, colormap_choice],
|
| 433 |
-
outputs=[output_image],
|
| 434 |
-
fn=
|
| 435 |
cache_examples=False,
|
| 436 |
label="Try these example images"
|
| 437 |
)
|
| 438 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 439 |
submit_btn.click(
|
| 440 |
fn=handle_prediction,
|
| 441 |
inputs=[input_source, upload_image, upload_file, camera_image, colormap_choice],
|
|
|
|
| 271 |
|
| 272 |
except Exception as e:
|
| 273 |
print(f"Error during inference: {str(e)}")
|
| 274 |
+
return None, gr.update(visible=False)
|
| 275 |
|
| 276 |
|
| 277 |
def capture_and_predict(camera_image, colormap_choice):
|
|
|
|
| 426 |
if os.path.exists(f"toyset/{vid_file}"):
|
| 427 |
video_examples.append([f"toyset/{vid_file}", "Spectral"])
|
| 428 |
|
| 429 |
+
# Function to handle video example selection and auto-switch mode
|
| 430 |
+
def handle_video_example(video_path, colormap):
|
| 431 |
+
# Auto-switch to video mode and return the necessary updates
|
| 432 |
+
return (
|
| 433 |
+
"Upload Video", # input_source
|
| 434 |
+
gr.update(visible=False), # upload_image
|
| 435 |
+
gr.update(visible=True, value=video_path), # upload_file
|
| 436 |
+
gr.update(visible=False) # camera_image
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
# Function to handle image example selection and auto-switch mode
|
| 440 |
+
def handle_image_example(image, colormap):
|
| 441 |
+
# Auto-switch to image mode and process the image
|
| 442 |
+
result = predict_depth(image, colormap)
|
| 443 |
+
output_image = result[0] if result[0] is not None else None
|
| 444 |
+
return (
|
| 445 |
+
"Upload Image", # input_source
|
| 446 |
+
gr.update(visible=True, value=image), # upload_image
|
| 447 |
+
gr.update(visible=False), # upload_file
|
| 448 |
+
gr.update(visible=False), # camera_image
|
| 449 |
+
output_image # output_image
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
if image_examples:
|
| 453 |
gr.Examples(
|
| 454 |
examples=image_examples,
|
| 455 |
inputs=[upload_image, colormap_choice],
|
| 456 |
+
outputs=[input_source, upload_image, upload_file, camera_image, output_image],
|
| 457 |
+
fn=handle_image_example,
|
| 458 |
cache_examples=False,
|
| 459 |
label="Try these example images"
|
| 460 |
)
|
| 461 |
|
| 462 |
+
if video_examples:
|
| 463 |
+
gr.Examples(
|
| 464 |
+
examples=video_examples,
|
| 465 |
+
inputs=[upload_file, colormap_choice],
|
| 466 |
+
outputs=[input_source, upload_image, upload_file, camera_image],
|
| 467 |
+
fn=handle_video_example,
|
| 468 |
+
cache_examples=False,
|
| 469 |
+
label="Try these example videos"
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
submit_btn.click(
|
| 473 |
fn=handle_prediction,
|
| 474 |
inputs=[input_source, upload_image, upload_file, camera_image, colormap_choice],
|