Spaces:
Runtime error
Runtime error
| import os | |
| import shutil | |
| import tempfile | |
| import gradio as gr | |
| import numpy as np | |
| import rembg | |
| import spaces | |
| import torch | |
| from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler, StableDiffusionXLPipeline, EulerDiscreteScheduler | |
| from einops import rearrange | |
| from huggingface_hub import hf_hub_download | |
| from omegaconf import OmegaConf | |
| from PIL import Image | |
| from pytorch_lightning import seed_everything | |
| from torchvision.transforms import v2 | |
| from safetensors.torch import load_file | |
| from src.utils.camera_util import (FOV_to_intrinsics, get_circular_camera_poses, | |
| get_zero123plus_input_cameras) | |
| from src.utils.infer_util import (remove_background, resize_foreground) | |
| from src.utils.mesh_util import save_glb, save_obj | |
| from src.utils.train_util import instantiate_from_config | |
| def find_cuda(): | |
| cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH') | |
| if cuda_home and os.path.exists(cuda_home): | |
| return cuda_home | |
| nvcc_path = shutil.which('nvcc') | |
| if nvcc_path: | |
| cuda_path = os.path.dirname(os.path.dirname(nvcc_path)) | |
| return cuda_path | |
| return None | |
| def get_render_cameras(batch_size=1, M=120, radius=2.5, elevation=10.0, is_flexicubes=False): | |
| c2ws = get_circular_camera_poses(M=M, radius=radius, elevation=elevation) | |
| if is_flexicubes: | |
| cameras = torch.linalg.inv(c2ws) | |
| cameras = cameras.unsqueeze(0).repeat(batch_size, 1, 1, 1) | |
| else: | |
| extrinsics = c2ws.flatten(-2) | |
| intrinsics = FOV_to_intrinsics(50.0).unsqueeze( | |
| 0).repeat(M, 1, 1).float().flatten(-2) | |
| cameras = torch.cat([extrinsics, intrinsics], dim=-1) | |
| cameras = cameras.unsqueeze(0).repeat(batch_size, 1, 1) | |
| return cameras | |
| def check_input_image(input_image): | |
| if input_image is None: | |
| raise gr.Error("No image selected!") | |
| def preprocess(input_image, do_remove_background): | |
| rembg_session = rembg.new_session() if do_remove_background else None | |
| if do_remove_background: | |
| input_image = remove_background(input_image, rembg_session) | |
| input_image = resize_foreground(input_image, 0.85) | |
| return input_image | |
| def generate_mvs(input_image, sample_steps, sample_seed): | |
| seed_everything(sample_seed) | |
| z123_image = pipeline( | |
| input_image, num_inference_steps=sample_steps).images[0] | |
| show_image = np.asarray(z123_image, dtype=np.uint8) | |
| show_image = torch.from_numpy(show_image) | |
| show_image = rearrange( | |
| show_image, '(n h) (m w) c -> (n m) h w c', n=3, m=2) | |
| show_image = rearrange( | |
| show_image, '(n m) h w c -> (n h) (m w) c', n=2, m=3) | |
| show_image = Image.fromarray(show_image.numpy()) | |
| return z123_image, show_image | |
| def make3d(images): | |
| global model | |
| if IS_FLEXICUBES: | |
| model.init_flexicubes_geometry(device, use_renderer=False) | |
| model = model.eval() | |
| images = np.asarray(images, dtype=np.float32) / 255.0 | |
| images = torch.from_numpy(images).permute(2, 0, 1).contiguous().float() | |
| images = rearrange(images, 'c (n h) (m w) -> (n m) c h w', n=3, m=2) | |
| input_cameras = get_zero123plus_input_cameras( | |
| batch_size=1, radius=4.0).to(device) | |
| render_cameras = get_render_cameras( | |
| batch_size=1, radius=2.5, is_flexicubes=IS_FLEXICUBES).to(device) | |
| images = images.unsqueeze(0).to(device) | |
| images = v2.functional.resize( | |
| images, (320, 320), interpolation=3, antialias=True).clamp(0, 1) | |
| mesh_fpath = tempfile.NamedTemporaryFile(suffix=f".obj", delete=False).name | |
| print(mesh_fpath) | |
| mesh_basename = os.path.basename(mesh_fpath).split('.')[0] | |
| mesh_dirname = os.path.dirname(mesh_fpath) | |
| mesh_glb_fpath = os.path.join(mesh_dirname, f"{mesh_basename}.glb") | |
| with torch.no_grad(): | |
| planes = model.forward_planes(images, input_cameras) | |
| mesh_out = model.extract_mesh( | |
| planes, use_texture_map=False, **infer_config) | |
| vertices, faces, vertex_colors = mesh_out | |
| vertices = vertices[:, [1, 2, 0]] | |
| save_glb(vertices, faces, vertex_colors, mesh_glb_fpath) | |
| save_obj(vertices, faces, vertex_colors, mesh_fpath) | |
| print(f"Mesh saved to {mesh_fpath}") | |
| return mesh_fpath, mesh_glb_fpath | |
| def generate_image(prompt): | |
| checkpoint = "sdxl_lightning_8step_unet.safetensors" | |
| num_inference_steps = 8 | |
| pipe.scheduler = EulerDiscreteScheduler.from_config( | |
| pipe.scheduler.config, timestep_spacing="trailing") | |
| pipe.unet.load_state_dict( | |
| load_file(hf_hub_download(repo, checkpoint), device="cuda")) | |
| results = pipe( | |
| prompt, num_inference_steps=num_inference_steps, guidance_scale=0) | |
| return results.images[0] | |
| # Configuration | |
| cuda_path = find_cuda() | |
| config_path = 'configs/instant-mesh-large.yaml' | |
| config = OmegaConf.load(config_path) | |
| config_name = os.path.basename(config_path).replace('.yaml', '') | |
| model_config = config.model_config | |
| infer_config = config.infer_config | |
| IS_FLEXICUBES = config_name.startswith('instant-mesh') | |
| device = torch.device('cuda') | |
| # Load diffusion model | |
| print('Loading diffusion model ...') | |
| pipeline = DiffusionPipeline.from_pretrained( | |
| "sudo-ai/zero123plus-v1.2", | |
| custom_pipeline="zero123plus", | |
| torch_dtype=torch.float16, | |
| ) | |
| pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config( | |
| pipeline.scheduler.config, timestep_spacing='trailing' | |
| ) | |
| unet_ckpt_path = hf_hub_download( | |
| repo_id="TencentARC/InstantMesh", filename="diffusion_pytorch_model.bin", repo_type="model") | |
| state_dict = torch.load(unet_ckpt_path, map_location='cpu') | |
| pipeline.unet.load_state_dict(state_dict, strict=True) | |
| pipeline = pipeline.to(device) | |
| # Load reconstruction model | |
| print('Loading reconstruction model ...') | |
| model_ckpt_path = hf_hub_download( | |
| repo_id="TencentARC/InstantMesh", filename="instant_mesh_large.ckpt", repo_type="model") | |
| model = instantiate_from_config(model_config) | |
| state_dict = torch.load(model_ckpt_path, map_location='cpu')['state_dict'] | |
| state_dict = {k[14:]: v for k, v in state_dict.items() if k.startswith( | |
| 'lrm_generator.') and 'source_camera' not in k} | |
| model.load_state_dict(state_dict, strict=True) | |
| model = model.to(device) | |
| # Load StableDiffusionXL model | |
| base = "stabilityai/stable-diffusion-xl-base-1.0" | |
| repo = "ByteDance/SDXL-Lightning" | |
| pipe = StableDiffusionXLPipeline.from_pretrained( | |
| base, torch_dtype=torch.float16, variant="fp16").to("cuda") | |
| print('Loading Finished!') | |
| with gr.Blocks() as demo: | |
| with gr.Group(): | |
| with gr.Tab("Generate Image and Remove Background"): | |
| with gr.Row(): | |
| prompt = gr.Textbox( | |
| label='Enter your prompt (English)', scale=2) | |
| submit_prompt = gr.Button( | |
| 'Generate Image', scale=1, variant='primary') | |
| img = gr.Image() | |
| with gr.Row(variant="panel"): | |
| with gr.Column(): | |
| with gr.Row(): | |
| input_image = gr.Image( | |
| label="Input Image", | |
| image_mode="RGBA", | |
| sources="upload", | |
| type="pil", | |
| elem_id="content_image", | |
| ) | |
| processed_image = gr.Image( | |
| label="Processed Image", | |
| image_mode="RGBA", | |
| type="pil", | |
| interactive=False | |
| ) | |
| with gr.Row(): | |
| with gr.Group(): | |
| do_remove_background = gr.Checkbox( | |
| label="Remove Background", value=True) | |
| with gr.Row(): | |
| submit_process = gr.Button( | |
| "Process Image", elem_id="process", variant="primary") | |
| with gr.Row(variant="panel"): | |
| gr.Examples( | |
| examples=[os.path.join("examples", img_name) for img_name in sorted( | |
| os.listdir("examples"))], | |
| inputs=[input_image], | |
| label="Examples", | |
| cache_examples=False, | |
| examples_per_page=16 | |
| ) | |
| with gr.Tab("Generate 3D Model"): | |
| with gr.Column(): | |
| with gr.Row(): | |
| with gr.Column(): | |
| mv_show_images = gr.Image( | |
| label="Generated Multi-views", | |
| type="pil", | |
| width=379, | |
| interactive=False | |
| ) | |
| with gr.Row(): | |
| with gr.Group(): | |
| sample_seed = gr.Number( | |
| value=42, label="Seed Value", precision=0) | |
| sample_steps = gr.Slider( | |
| label="Sample Steps", minimum=30, maximum=75, value=75, step=5) | |
| with gr.Row(): | |
| submit_mesh = gr.Button( | |
| "Generate 3D Model", elem_id="generate", variant="primary") | |
| with gr.Row(): | |
| with gr.Tab("OBJ"): | |
| output_model_obj = gr.Model3D( | |
| label="Output Model (OBJ Format)", | |
| interactive=False, | |
| ) | |
| gr.Markdown( | |
| "Note: Downloaded .obj model will be flipped. Export .glb instead or manually flip it before usage.") | |
| with gr.Tab("GLB"): | |
| output_model_glb = gr.Model3D( | |
| label="Output Model (GLB Format)", | |
| interactive=False, | |
| ) | |
| gr.Markdown( | |
| "Note: The model shown here has a darker appearance. Download to get correct results.") | |
| with gr.Row(): | |
| gr.Markdown( | |
| '''Try a different <b>seed value</b> if the result is unsatisfying (Default: 42).''') | |
| mv_images = gr.State() | |
| submit_prompt.click(fn=generate_image, inputs=[prompt], outputs=img) | |
| submit_process.click(fn=check_input_image, inputs=[input_image]).success( | |
| fn=preprocess, inputs=[input_image, | |
| do_remove_background], outputs=[processed_image], | |
| ) | |
| submit_mesh.click(fn=generate_mvs, inputs=[processed_image, sample_steps, sample_seed], outputs=[mv_images, mv_show_images]).success( | |
| fn=make3d, inputs=[mv_images], outputs=[ | |
| output_model_obj, output_model_glb] | |
| ) | |
| demo.launch() | |