Asset2Scene / app.py
MetricMogul's picture
Update app.py
2211dd0 verified
from pathlib import Path
import gradio as gr
from canvas_editor import (
HTML_TEMPLATE,
CSS_TEMPLATE,
JS_ON_LOAD,
payload_append,
payload_reset,
empty_canvas_payload,
)
from shape_e_service import (
DEVICE,
clear_saved_assets,
generate_and_add_asset,
next_view,
prev_view,
select_asset,
selected_view_path,
set_prompt,
)
from sd_service import generate_with_depth_from_scene
ROOT_DIR = Path(__file__).resolve().parent
DATA_DIR = ROOT_DIR / "data"
DATA_DIR.mkdir(parents=True, exist_ok=True)
CSS = """
#title-wrap {
text-align: center;
margin-bottom: 14px;
}
#subtitle {
color: #9ca3af;
font-size: 15px;
max-width: 900px;
margin: 0 auto;
line-height: 1.45;
}
#device-badge {
display: inline-block;
margin-top: 10px;
padding: 6px 10px;
border-radius: 999px;
background: #111827;
border: 1px solid #374151;
font-size: 13px;
}
#scene_png_data {
display: none;
}
.story-box {
border: 1px solid #374151;
border-radius: 12px;
padding: 14px 16px;
background: #0f172a;
margin-bottom: 14px;
}
.story-box h3 {
margin: 0 0 8px 0;
font-size: 16px;
}
.story-box p {
margin: 0;
color: #cbd5e1;
line-height: 1.5;
}
"""
ASSET_PROMPTS = {
"Princess": "A stylized fantasy princess figurine in a long dress",
"Dragon": "A small fantasy dragon figurine with wings",
"Medieval house": "A low-poly medieval house",
"Forest tree": "A stylized fantasy forest tree",
"Treasure chest": "A fantasy treasure chest with gold trim",
"Stone well": "A medieval stone well",
}
DEFAULT_ASSET_PROMPT = ASSET_PROMPTS["Princess"]
DEFAULT_SD_PROMPT = (
"A fantasy storybook scene: a princess in a forest near a medieval house, "
"with a small dragon nearby, cinematic lighting, detailed environment, "
"cozy magical atmosphere, highly detailed"
)
DEFAULT_NEGATIVE_PROMPT = (
"low quality, blurry, cropped, deformed, duplicate objects, floating objects, "
"extra limbs, bad anatomy, text, watermark, white background"
)
def add_selected_view_to_canvas(saved_assets, selected_asset_index):
path = selected_view_path(saved_assets, selected_asset_index)
if not path:
raise gr.Error("Select an asset first.")
return payload_append([path])
def reset_canvas_with_selected_view(saved_assets, selected_asset_index):
path = selected_view_path(saved_assets, selected_asset_index)
if not path:
raise gr.Error("Select an asset first.")
return payload_reset([path])
def clear_canvas():
return empty_canvas_payload()
with gr.Blocks(title="Asset2Scene") as demo:
saved_assets_state = gr.State([])
selected_asset_index_state = gr.State(None)
# Hidden field: canvas JS writes exported PNG composition here as data URL
scene_png_data = gr.Textbox(elem_id="scene_png_data", value="", lines=1)
gr.HTML(
f"""
<div id="title-wrap">
<h1>Asset2Scene</h1>
<div id="subtitle">
Build a scene from separate Shap-E assets, place them on a canvas,
then use that composition only to estimate depth. Stable Diffusion
receives your text prompt plus the depth map as ControlNet guidance,
so it follows the spatial idea without being forced to copy the raw collage literally.
</div>
<div id="device-badge">Device: {DEVICE}</div>
</div>
"""
)
gr.HTML(
"""
<div class="story-box">
<h3>Suggested story</h3>
<p>
Try building a simple fairy-tale setup: a princess in a forest near a medieval house,
with a dragon nearby. Generate the pieces one by one, place them on the canvas,
and then let SD reinterpret the layout into a richer final image.
</p>
</div>
"""
)
with gr.Row():
with gr.Column(scale=1):
prompt = gr.Textbox(
label="Asset prompt",
lines=3,
value=DEFAULT_ASSET_PROMPT,
placeholder="Describe one object or character to generate as a Shap-E asset...",
)
gr.Markdown("**Quick asset ideas**")
with gr.Row():
princess_btn = gr.Button("Princess")
dragon_btn = gr.Button("Dragon")
house_btn = gr.Button("House")
with gr.Row():
tree_btn = gr.Button("Tree")
chest_btn = gr.Button("Chest")
well_btn = gr.Button("Well")
with gr.Row():
steps = gr.Slider(
minimum=8,
maximum=64,
step=1,
value=24 if DEVICE == "cpu" else 32,
label="Shap-E steps",
)
guidance_scale = gr.Slider(
minimum=1.0,
maximum=20.0,
step=0.5,
value=12.0,
label="Shap-E guidance",
)
with gr.Row():
frame_size = gr.Slider(
minimum=64,
maximum=256,
step=32,
value=64 if DEVICE == "cpu" else 256,
label="Frame size",
)
seed = gr.Number(
label="Shap-E seed",
value=42,
precision=0,
)
with gr.Row():
generate_btn = gr.Button("Generate asset and add to gallery", variant="primary")
clear_saved_btn = gr.Button("Clear gallery")
gr.Markdown(
"Generate one object at a time, pick the best view, and send it to the canvas."
)
with gr.Column(scale=1):
current_view = gr.Image(label="Selected asset view", type="filepath")
view_text = gr.Markdown("No asset selected.")
with gr.Row():
prev_btn = gr.Button("← Prev view", interactive=False)
next_btn = gr.Button("Next view →", interactive=False)
with gr.Row():
add_to_canvas_btn = gr.Button("Add selected view to canvas", variant="primary")
reset_canvas_btn = gr.Button("Reset canvas with selected view")
saved_gallery = gr.Gallery(
label="Saved assets",
columns=3,
height="auto",
preview=False,
)
gr.Markdown("## Scene canvas")
gr.Markdown(
"Place the generated assets here. Move them around, resize them, and build the rough composition of the scene."
)
editor = gr.HTML(
value='{"render_id": null, "mode": "append", "items": []}',
html_template=HTML_TEMPLATE,
css_template=CSS_TEMPLATE,
js_on_load=JS_ON_LOAD,
)
clear_canvas_btn = gr.Button("Clear canvas")
gr.Markdown("## Stable Diffusion + ControlNet depth")
gr.Markdown(
"The canvas composition is used only to estimate depth. SD receives your text prompt and the depth map, so it can reinterpret the scene more freely."
)
sd_prompt = gr.Textbox(
label="Scene prompt for SD",
lines=4,
value=DEFAULT_SD_PROMPT,
)
sd_negative_prompt = gr.Textbox(
label="Negative prompt",
lines=2,
value=DEFAULT_NEGATIVE_PROMPT,
)
with gr.Row():
sd_steps = gr.Slider(
label="SD steps",
minimum=10,
maximum=50,
step=1,
value=30,
)
sd_guidance = gr.Slider(
label="Guidance scale",
minimum=1.0,
maximum=12.0,
step=0.5,
value=7.5,
)
controlnet_scale = gr.Slider(
label="Depth control scale",
minimum=0.1,
maximum=2.0,
step=0.1,
value=0.9,
)
sd_seed = gr.Number(label="SD seed", value=1234, precision=0)
render_sd_btn = gr.Button("Render scene with SD + depth", variant="primary")
with gr.Row():
scene_preview = gr.Image(label="Canvas composition used for depth", type="filepath")
depth_preview = gr.Image(label="Depth map used by ControlNet", type="filepath")
sd_result = gr.Image(label="Final SD result", type="filepath")
# Quick asset buttons
princess_btn.click(fn=lambda: set_prompt(ASSET_PROMPTS["Princess"]), outputs=prompt)
dragon_btn.click(fn=lambda: set_prompt(ASSET_PROMPTS["Dragon"]), outputs=prompt)
house_btn.click(fn=lambda: set_prompt(ASSET_PROMPTS["Medieval house"]), outputs=prompt)
tree_btn.click(fn=lambda: set_prompt(ASSET_PROMPTS["Forest tree"]), outputs=prompt)
chest_btn.click(fn=lambda: set_prompt(ASSET_PROMPTS["Treasure chest"]), outputs=prompt)
well_btn.click(fn=lambda: set_prompt(ASSET_PROMPTS["Stone well"]), outputs=prompt)
# Shap-E generation
generate_btn.click(
fn=generate_and_add_asset,
inputs=[prompt, steps, guidance_scale, frame_size, seed, saved_assets_state],
outputs=[
saved_assets_state,
selected_asset_index_state,
saved_gallery,
current_view,
view_text,
prev_btn,
next_btn,
],
)
# Gallery selection
saved_gallery.select(
fn=select_asset,
inputs=[saved_assets_state],
outputs=[
selected_asset_index_state,
saved_gallery,
current_view,
view_text,
prev_btn,
next_btn,
],
)
# View switching
prev_btn.click(
fn=prev_view,
inputs=[saved_assets_state, selected_asset_index_state],
outputs=[
saved_assets_state,
saved_gallery,
current_view,
view_text,
prev_btn,
next_btn,
],
)
next_btn.click(
fn=next_view,
inputs=[saved_assets_state, selected_asset_index_state],
outputs=[
saved_assets_state,
saved_gallery,
current_view,
view_text,
prev_btn,
next_btn,
],
)
# Clear gallery
clear_saved_btn.click(
fn=clear_saved_assets,
outputs=[
saved_assets_state,
selected_asset_index_state,
saved_gallery,
current_view,
view_text,
prev_btn,
next_btn,
],
)
# Canvas actions
add_to_canvas_btn.click(
fn=add_selected_view_to_canvas,
inputs=[saved_assets_state, selected_asset_index_state],
outputs=editor,
)
reset_canvas_btn.click(
fn=reset_canvas_with_selected_view,
inputs=[saved_assets_state, selected_asset_index_state],
outputs=editor,
)
clear_canvas_btn.click(
fn=clear_canvas,
outputs=editor,
)
# SD render: prompt + depth only
render_sd_btn.click(
fn=generate_with_depth_from_scene,
inputs=[
scene_png_data,
sd_prompt,
sd_negative_prompt,
sd_steps,
sd_guidance,
controlnet_scale,
sd_seed,
],
outputs=[scene_preview, depth_preview, sd_result],
)
if __name__ == "__main__":
demo.launch(
css=CSS,
allowed_paths=[str(DATA_DIR)],
)