Spaces:
Running on Zero
Running on Zero
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -420,60 +420,38 @@ def get_seed(randomize_seed: bool, seed: int) -> int:
|
|
| 420 |
return np.random.randint(0, MAX_SEED) if randomize_seed else seed
|
| 421 |
|
| 422 |
|
| 423 |
-
def
|
| 424 |
-
"""
|
| 425 |
-
images = [Image.open(p) for p in image_paths[:3]] # Use first 3 images
|
| 426 |
-
n = len(images)
|
| 427 |
-
|
| 428 |
-
# Resize each image to thumbnail size
|
| 429 |
-
thumbs = []
|
| 430 |
-
for img in images:
|
| 431 |
-
img = img.convert('RGBA')
|
| 432 |
-
img.thumbnail((thumb_size, thumb_size), Image.Resampling.LANCZOS)
|
| 433 |
-
thumbs.append(img)
|
| 434 |
-
|
| 435 |
-
# Create composite image
|
| 436 |
-
width = thumb_size * n + (n - 1) * 4 # 4px gap between images
|
| 437 |
-
height = thumb_size
|
| 438 |
-
composite = Image.new('RGBA', (width, height), (0, 0, 0, 0))
|
| 439 |
-
|
| 440 |
-
x = 0
|
| 441 |
-
for thumb in thumbs:
|
| 442 |
-
# Center vertically
|
| 443 |
-
y = (height - thumb.height) // 2
|
| 444 |
-
composite.paste(thumb, (x + (thumb_size - thumb.width) // 2, y), thumb)
|
| 445 |
-
x += thumb_size + 4
|
| 446 |
-
|
| 447 |
-
return composite
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
def prepare_examples() -> List[Tuple[List[str], Image.Image]]:
|
| 451 |
-
"""Prepare multi-image examples with thumbnails."""
|
| 452 |
example_dir = "assets/example_multi_image"
|
| 453 |
if not os.path.exists(example_dir):
|
| 454 |
return []
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
examples = []
|
| 458 |
for case in sorted(cases):
|
| 459 |
case_images = []
|
| 460 |
for i in range(1, 10):
|
| 461 |
img_path = f'{example_dir}/{case}_{i}.png'
|
| 462 |
if os.path.exists(img_path):
|
| 463 |
-
|
|
|
|
|
|
|
|
|
|
| 464 |
if case_images:
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
return examples
|
| 468 |
|
| 469 |
|
| 470 |
-
def
|
| 471 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 472 |
images = []
|
| 473 |
-
for
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
return images
|
| 477 |
|
| 478 |
|
| 479 |
@spaces.GPU(duration=120)
|
|
@@ -708,29 +686,20 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
| 708 |
gr.Markdown("*GLB extraction may take 30+ seconds.*")
|
| 709 |
|
| 710 |
with gr.Column(scale=1, min_width=200):
|
|
|
|
|
|
|
| 711 |
gr.Markdown("### Multi-View Examples")
|
| 712 |
-
|
| 713 |
-
|
| 714 |
-
|
| 715 |
-
|
| 716 |
-
|
| 717 |
-
|
| 718 |
-
|
| 719 |
-
|
| 720 |
-
container=False,
|
| 721 |
-
)
|
| 722 |
-
btn = gr.Button(f"{case_name} ({len(example_paths)} views)", size="sm")
|
| 723 |
-
example_buttons.append((btn, example_paths))
|
| 724 |
|
| 725 |
output_buf = gr.State()
|
| 726 |
|
| 727 |
-
# Connect example buttons
|
| 728 |
-
for btn, paths in example_buttons:
|
| 729 |
-
btn.click(
|
| 730 |
-
fn=lambda p=paths: load_example(p),
|
| 731 |
-
outputs=[image_prompt]
|
| 732 |
-
)
|
| 733 |
-
|
| 734 |
# Handlers
|
| 735 |
demo.load(start_session)
|
| 736 |
demo.unload(end_session)
|
|
|
|
| 420 |
return np.random.randint(0, MAX_SEED) if randomize_seed else seed
|
| 421 |
|
| 422 |
|
| 423 |
+
def prepare_multi_example() -> List[Image.Image]:
|
| 424 |
+
"""Prepare multi-image examples as concatenated images for gr.Examples."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 425 |
example_dir = "assets/example_multi_image"
|
| 426 |
if not os.path.exists(example_dir):
|
| 427 |
return []
|
| 428 |
+
cases = list(set([f.split('_')[0] for f in os.listdir(example_dir) if '_' in f and f.endswith('.png')]))
|
| 429 |
+
images = []
|
|
|
|
| 430 |
for case in sorted(cases):
|
| 431 |
case_images = []
|
| 432 |
for i in range(1, 10):
|
| 433 |
img_path = f'{example_dir}/{case}_{i}.png'
|
| 434 |
if os.path.exists(img_path):
|
| 435 |
+
img = Image.open(img_path)
|
| 436 |
+
W, H = img.size
|
| 437 |
+
img = img.resize((int(W / H * 512), 512))
|
| 438 |
+
case_images.append(np.array(img))
|
| 439 |
if case_images:
|
| 440 |
+
images.append(Image.fromarray(np.concatenate(case_images, axis=1)))
|
| 441 |
+
return images
|
|
|
|
| 442 |
|
| 443 |
|
| 444 |
+
def split_image(image: Image.Image) -> List[Image.Image]:
|
| 445 |
+
"""Split a concatenated multi-view image into separate images based on alpha."""
|
| 446 |
+
image = np.array(image)
|
| 447 |
+
alpha = image[..., 3]
|
| 448 |
+
alpha = np.any(alpha > 0, axis=0)
|
| 449 |
+
start_pos = np.where(~alpha[:-1] & alpha[1:])[0].tolist()
|
| 450 |
+
end_pos = np.where(alpha[:-1] & ~alpha[1:])[0].tolist()
|
| 451 |
images = []
|
| 452 |
+
for s, e in zip(start_pos, end_pos):
|
| 453 |
+
images.append(Image.fromarray(image[:, s:e+1]))
|
| 454 |
+
return [preprocess_image(img) for img in images]
|
|
|
|
| 455 |
|
| 456 |
|
| 457 |
@spaces.GPU(duration=120)
|
|
|
|
| 686 |
gr.Markdown("*GLB extraction may take 30+ seconds.*")
|
| 687 |
|
| 688 |
with gr.Column(scale=1, min_width=200):
|
| 689 |
+
# Hidden image for examples input
|
| 690 |
+
example_image = gr.Image(visible=False, type="pil", image_mode="RGBA")
|
| 691 |
gr.Markdown("### Multi-View Examples")
|
| 692 |
+
examples = gr.Examples(
|
| 693 |
+
examples=prepare_multi_example(),
|
| 694 |
+
inputs=[example_image],
|
| 695 |
+
fn=split_image,
|
| 696 |
+
outputs=[image_prompt],
|
| 697 |
+
run_on_click=True,
|
| 698 |
+
examples_per_page=12,
|
| 699 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 700 |
|
| 701 |
output_buf = gr.State()
|
| 702 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 703 |
# Handlers
|
| 704 |
demo.load(start_session)
|
| 705 |
demo.unload(end_session)
|