from daggr import FnNode, GradioNode, InferenceNode, Graph from daggr.state import get_daggr_files_dir import gradio as gr import numpy as np from PIL import Image from typing import Any import uuid def downscale_image_to_file(image: Any, scale: float = 0.25) -> str | None: pil_img = Image.open(image) scale_f = max(0.05, min(1.0, float(scale))) w, h = pil_img.size new_w = max(1, int(w * scale_f)) new_h = max(1, int(h * scale_f)) resized = pil_img.resize((new_w, new_h), resample=Image.LANCZOS) out_path = get_daggr_files_dir() / f"{uuid.uuid4()}.png" resized.save(out_path) return str(out_path) background_remover = GradioNode( "merve/background-removal", api_name="/image", run_locally=True, inputs={ "image": gr.Image(), }, outputs={ "original_image": None, "final_image": gr.Image( label="Final Image" ), }, ) downscaler = FnNode( downscale_image_to_file, name="Downscale image for Inference", inputs={ "image": background_remover.final_image, "scale": gr.Slider( label="Downscale factor", minimum=0.25, maximum=0.75, step=0.05, value=0.25, ), }, outputs={ "image": gr.Image(label="Downscaled Image", type="filepath"), }, ) trellis_3d = GradioNode( "microsoft/TRELLIS.2", api_name="/image_to_3d", inputs={ "image": downscaler.image, "ss_guidance_strength": 7.5, "ss_sampling_steps": 12, }, outputs={ "glb": gr.HTML(label="3D Asset (GLB preview)"), }, ) sam3d_obj = GradioNode( "HorizonRobotics/EmbodiedGen-Image-to-3D", api_name="/extract_3d_representations_v3", inputs=[ ] ) graph = Graph( name="Image to 3D Asset Pipeline", nodes=[background_remover, downscaler, trellis_3d], ) ''' from gradio_client import Client, file client = Client("HorizonRobotics/EmbodiedGen-Image-to-3D") client.predict( enable_delight=None, texture_size=true, api_name="/extract_3d_representations_v3" ) client.predict( api_name="/lambda_4" ) client.predict( gs_path="/home/user/app/sessions/imageto3d/29hqqc189st/sample_gs_aligned.ply", mesh_obj_path="/home/user/app/sessions/imageto3d/29hqqc189st/sample.obj", asset_cat_text="", height_range_text="", mass_range_text="", asset_version_text="", api_name="/extract_urdf" ) client.predict( api_name="/lambda_5" ) ''' ''' from gradio_client import Client, file client = Client("HorizonRobotics/EmbodiedGen-Image-to-3D") client.predict( api_name="/lambda_2" ) client.predict( content=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/1219da499ed7b9468eca3ab819eb09a47479748a66a61f8608006b92a4a635a7/chairelect.png'), api_name="/active_btn_by_content" ) client.predict( image=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/1219da499ed7b9468eca3ab819eb09a47479748a66a61f8608006b92a4a635a7/chairelect.png'), rmbg_tag="rembg", api_name="/preprocess_image_fn" ) client.predict( api_name="/lambda_2" ) client.predict( content=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/f0b1343c3d64f50b7a08ce3027056ba9259d96960e58625a1df07922e4a3a3f4/image.png'), api_name="/active_btn_by_content" ) client.predict( randomize_seed=False, seed=0, api_name="/get_seed" ) client.predict( image=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/f0b1343c3d64f50b7a08ce3027056ba9259d96960e58625a1df07922e4a3a3f4/image.png'), seed=0, ss_sampling_steps=25, slat_sampling_steps=25, raw_image_cache=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/a7f55099fbfd47c44667d5e3eeee8818bf41ab1a5a70fc9bed2d5ce3c68f7015/image.png'), ss_guidance_strength=7.5, slat_guidance_strength=3, sam_image=None, api_name="/image_to_3d" ) client.predict( enable_delight=None, texture_size=true, api_name="/extract_3d_representations_v3" ) client.predict( api_name="/lambda_4" ) client.predict( gs_path="/home/user/app/sessions/imageto3d/1kxl1n8ek38/sample_gs_aligned.ply", mesh_obj_path="/home/user/app/sessions/imageto3d/1kxl1n8ek38/sample.obj", asset_cat_text="chair", height_range_text="0.5", mass_range_text="6", asset_version_text="0.0.1", api_name="/extract_urdf" ) client.predict( gs_path="/home/user/app/sessions/imageto3d/1kxl1n8ek38/sample_gs_aligned.ply", mesh_obj_path="/home/user/app/sessions/imageto3d/1kxl1n8ek38/sample.obj", asset_cat_text="chair", height_range_text="0.5-0.7", mass_range_text="2.1-3.5", asset_version_text="v0.0.1", api_name="/extract_urdf" ) client.predict( api_name="/lambda_5" ) ''' ''' from gradio_client import Client, file client = Client("prithivMLmods/Z-Image-Turbo-LoRA-DLC") client.predict( width=1024, height=1024, api_name="/update_selection" ) client.predict( prompt="Pull a purple plumb out ya butt", image_input=None, image_strength=0.75, cfg_scale=0, steps=9, randomize_seed=None, seed=true, width=256386538, height=1024, lora_scale=1024, api_name="/run_lora" ) ''' if __name__ == "__main__": graph.launch()