Spaces:
Runtime error
Runtime error
upgraded gradio. image display issue to be fixed
Browse files
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: πΈππ
|
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
license: mit
|
|
@@ -26,12 +26,11 @@ Please refer to our [GitHub repo](https://github.com/One-2-3-45/One-2-3-45) for
|
|
| 26 |
## Citation
|
| 27 |
|
| 28 |
```bibtex
|
| 29 |
-
@
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
primaryClass={cs.CV}
|
| 36 |
}
|
| 37 |
```
|
|
|
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.22.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
license: mit
|
|
|
|
| 26 |
## Citation
|
| 27 |
|
| 28 |
```bibtex
|
| 29 |
+
@article{liu2023one2345,
|
| 30 |
+
title={One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization},
|
| 31 |
+
author={Liu, Minghua and Xu, Chao and Jin, Haian and Chen, Linghao and Varma T, Mukund and Xu, Zexiang and Su, Hao},
|
| 32 |
+
journal={Advances in Neural Information Processing Systems},
|
| 33 |
+
volume={36},
|
| 34 |
+
year={2024}
|
|
|
|
| 35 |
}
|
| 36 |
```
|
app.py
CHANGED
|
@@ -322,7 +322,7 @@ def stage1_run(models, device, cam_vis, tmp_dir,
|
|
| 322 |
return (90-elev_output, new_fig, *output_ims, *output_ims_2)
|
| 323 |
else:
|
| 324 |
rerun_idx = [i for i in range(len(btn_retrys)) if btn_retrys[i]]
|
| 325 |
-
if 90-int(elev
|
| 326 |
rerun_idx_in = [i if i < 4 else i+4 for i in rerun_idx]
|
| 327 |
else:
|
| 328 |
rerun_idx_in = rerun_idx
|
|
@@ -340,7 +340,7 @@ def stage1_run(models, device, cam_vis, tmp_dir,
|
|
| 340 |
|
| 341 |
def stage2_run(models, device, tmp_dir,
|
| 342 |
elev, scale, is_glb=False, rerun_all=[], stage2_steps=50):
|
| 343 |
-
flag_lower_cam = 90-int(elev
|
| 344 |
is_rerun = True if rerun_all else False
|
| 345 |
model = models['turncam'].half()
|
| 346 |
if not is_rerun:
|
|
@@ -593,8 +593,8 @@ def run_demo(
|
|
| 593 |
gr.Markdown(_DESCRIPTION)
|
| 594 |
|
| 595 |
with gr.Row(variant='panel'):
|
| 596 |
-
with gr.Column(scale=
|
| 597 |
-
image_block = gr.Image(type='pil', image_mode='RGBA', height=290, label='Input image'
|
| 598 |
|
| 599 |
gr.Examples(
|
| 600 |
examples=examples_full,
|
|
@@ -617,7 +617,7 @@ def run_demo(
|
|
| 617 |
run_btn = gr.Button('Run Generation', variant='primary', interactive=False)
|
| 618 |
guide_text = gr.Markdown(_USER_GUIDE, visible=True)
|
| 619 |
|
| 620 |
-
with gr.Column(scale=
|
| 621 |
with gr.Row():
|
| 622 |
bbox_block = gr.Image(type='pil', label="Bounding box", height=290, interactive=False)
|
| 623 |
sam_block = gr.Image(type='pil', label="SAM output", interactive=False)
|
|
|
|
| 322 |
return (90-elev_output, new_fig, *output_ims, *output_ims_2)
|
| 323 |
else:
|
| 324 |
rerun_idx = [i for i in range(len(btn_retrys)) if btn_retrys[i]]
|
| 325 |
+
if 90-int(elev) > 75:
|
| 326 |
rerun_idx_in = [i if i < 4 else i+4 for i in rerun_idx]
|
| 327 |
else:
|
| 328 |
rerun_idx_in = rerun_idx
|
|
|
|
| 340 |
|
| 341 |
def stage2_run(models, device, tmp_dir,
|
| 342 |
elev, scale, is_glb=False, rerun_all=[], stage2_steps=50):
|
| 343 |
+
flag_lower_cam = 90-int(elev) <= 75
|
| 344 |
is_rerun = True if rerun_all else False
|
| 345 |
model = models['turncam'].half()
|
| 346 |
if not is_rerun:
|
|
|
|
| 593 |
gr.Markdown(_DESCRIPTION)
|
| 594 |
|
| 595 |
with gr.Row(variant='panel'):
|
| 596 |
+
with gr.Column(scale=33):
|
| 597 |
+
image_block = gr.Image(type='pil', image_mode='RGBA', height=290, label='Input image')
|
| 598 |
|
| 599 |
gr.Examples(
|
| 600 |
examples=examples_full,
|
|
|
|
| 617 |
run_btn = gr.Button('Run Generation', variant='primary', interactive=False)
|
| 618 |
guide_text = gr.Markdown(_USER_GUIDE, visible=True)
|
| 619 |
|
| 620 |
+
with gr.Column(scale=20):
|
| 621 |
with gr.Row():
|
| 622 |
bbox_block = gr.Image(type='pil', label="Bounding box", height=290, interactive=False)
|
| 623 |
sam_block = gr.Image(type='pil', label="SAM output", interactive=False)
|