Spaces:
Sleeping
Sleeping
add separate boxes for prompts
Browse files
app.py
CHANGED
|
@@ -22,7 +22,7 @@ luma_client = LumaAI(auth_token=LUMA_API_KEY)
|
|
| 22 |
# PHASE 1: GPU DRAWING (ZeroGPU)
|
| 23 |
# ==========================================
|
| 24 |
@spaces.GPU(duration=60)
|
| 25 |
-
def generate_frames(sketch_1, sketch_2,
|
| 26 |
print("π¨ Initializing GPU Pipelines...")
|
| 27 |
dtype = torch.float16
|
| 28 |
controlnet = ControlNetModel.from_pretrained("xinsir/controlnet-scribble-sdxl-1.0", torch_dtype=dtype)
|
|
@@ -37,7 +37,7 @@ def generate_frames(sketch_1, sketch_2, prompt, ctrl_scale, consistency):
|
|
| 37 |
# --- Start Frame ---
|
| 38 |
s1 = sketch_1.convert("RGB").resize((1024, 1024), Image.NEAREST)
|
| 39 |
img_1 = pipe(
|
| 40 |
-
prompt=f"stunning digital illustration, {
|
| 41 |
image=s1,
|
| 42 |
controlnet_conditioning_scale=float(ctrl_scale)
|
| 43 |
).images[0]
|
|
@@ -47,7 +47,7 @@ def generate_frames(sketch_1, sketch_2, prompt, ctrl_scale, consistency):
|
|
| 47 |
i2i_pipe = StableDiffusionXLControlNetImg2ImgPipeline(**pipe.components).to("cuda")
|
| 48 |
s2 = sketch_2.convert("RGB").resize((1024, 1024), Image.NEAREST)
|
| 49 |
img_2 = i2i_pipe(
|
| 50 |
-
prompt=f"stunning digital illustration, {
|
| 51 |
image=img_1,
|
| 52 |
control_image=s2,
|
| 53 |
strength=float(consistency)
|
|
@@ -68,12 +68,12 @@ def upload_to_web(image):
|
|
| 68 |
)
|
| 69 |
return response.json()['data']['url']
|
| 70 |
|
| 71 |
-
def master_pipeline(sketch_1, sketch_2,
|
| 72 |
if not LUMA_API_KEY or not IMGBB_API_KEY:
|
| 73 |
raise gr.Error("π API Keys missing! Add them to Space Secrets.")
|
| 74 |
-
|
| 75 |
# 1. Generate Images on ZeroGPU
|
| 76 |
-
img_1, img_2 = generate_frames(sketch_1, sketch_2,
|
| 77 |
|
| 78 |
# 2. Upload to ImgBB (CPU)
|
| 79 |
print("βοΈ Uploading to cloud...")
|
|
@@ -83,7 +83,7 @@ def master_pipeline(sketch_1, sketch_2, prompt, ctrl_scale, consistency):
|
|
| 83 |
# 3. Request Luma Video (CPU)
|
| 84 |
print("π¬ Requesting Luma Ray-2 Animation...")
|
| 85 |
generation = luma_client.generations.create(
|
| 86 |
-
prompt=f"Cinematic stylized animation, {
|
| 87 |
model="ray-2",
|
| 88 |
duration="3s",
|
| 89 |
resolution="540p",
|
|
@@ -110,21 +110,36 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 110 |
gr.Markdown("Transform sketches into professional animations using local ZeroGPU stylization and Luma cloud rendering.")
|
| 111 |
|
| 112 |
with gr.Row():
|
|
|
|
| 113 |
with gr.Column():
|
| 114 |
s1 = gr.Image(type="pil", label="Start Sketch (Black & White)")
|
| 115 |
s2 = gr.Image(type="pil", label="End Sketch (Black & White)")
|
| 116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
with gr.Accordion("Settings", open=False):
|
| 118 |
sc = gr.Slider(0, 2, 0.6, label="Sketch Strictness")
|
| 119 |
cs = gr.Slider(0.5, 1, 0.85, label="Color Consistency")
|
|
|
|
| 120 |
btn = gr.Button("Generate Animation", variant="primary")
|
| 121 |
|
|
|
|
| 122 |
with gr.Column():
|
| 123 |
-
|
| 124 |
with gr.Row():
|
| 125 |
o1 = gr.Image(label="Start Frame")
|
| 126 |
o2 = gr.Image(label="End Frame")
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
demo.launch()
|
|
|
|
| 22 |
# PHASE 1: GPU DRAWING (ZeroGPU)
|
| 23 |
# ==========================================
|
| 24 |
@spaces.GPU(duration=60)
|
| 25 |
+
def generate_frames(sketch_1, sketch_2, image_prompt, ctrl_scale, consistency):
|
| 26 |
print("π¨ Initializing GPU Pipelines...")
|
| 27 |
dtype = torch.float16
|
| 28 |
controlnet = ControlNetModel.from_pretrained("xinsir/controlnet-scribble-sdxl-1.0", torch_dtype=dtype)
|
|
|
|
| 37 |
# --- Start Frame ---
|
| 38 |
s1 = sketch_1.convert("RGB").resize((1024, 1024), Image.NEAREST)
|
| 39 |
img_1 = pipe(
|
| 40 |
+
prompt=f"stunning digital illustration, {image_prompt}",
|
| 41 |
image=s1,
|
| 42 |
controlnet_conditioning_scale=float(ctrl_scale)
|
| 43 |
).images[0]
|
|
|
|
| 47 |
i2i_pipe = StableDiffusionXLControlNetImg2ImgPipeline(**pipe.components).to("cuda")
|
| 48 |
s2 = sketch_2.convert("RGB").resize((1024, 1024), Image.NEAREST)
|
| 49 |
img_2 = i2i_pipe(
|
| 50 |
+
prompt=f"stunning digital illustration, {image_prompt}",
|
| 51 |
image=img_1,
|
| 52 |
control_image=s2,
|
| 53 |
strength=float(consistency)
|
|
|
|
| 68 |
)
|
| 69 |
return response.json()['data']['url']
|
| 70 |
|
| 71 |
+
def master_pipeline(sketch_1, sketch_2, image_prompt, video_prompt, ctrl_scale, consistency):
|
| 72 |
if not LUMA_API_KEY or not IMGBB_API_KEY:
|
| 73 |
raise gr.Error("π API Keys missing! Add them to Space Secrets.")
|
| 74 |
+
|
| 75 |
# 1. Generate Images on ZeroGPU
|
| 76 |
+
img_1, img_2 = generate_frames(sketch_1, sketch_2, image_prompt, ctrl_scale, consistency)
|
| 77 |
|
| 78 |
# 2. Upload to ImgBB (CPU)
|
| 79 |
print("βοΈ Uploading to cloud...")
|
|
|
|
| 83 |
# 3. Request Luma Video (CPU)
|
| 84 |
print("π¬ Requesting Luma Ray-2 Animation...")
|
| 85 |
generation = luma_client.generations.create(
|
| 86 |
+
prompt=f"Cinematic stylized animation, {video_prompt}",
|
| 87 |
model="ray-2",
|
| 88 |
duration="3s",
|
| 89 |
resolution="540p",
|
|
|
|
| 110 |
gr.Markdown("Transform sketches into professional animations using local ZeroGPU stylization and Luma cloud rendering.")
|
| 111 |
|
| 112 |
with gr.Row():
|
| 113 |
+
# --- INPUT COLUMN ---
|
| 114 |
with gr.Column():
|
| 115 |
s1 = gr.Image(type="pil", label="Start Sketch (Black & White)")
|
| 116 |
s2 = gr.Image(type="pil", label="End Sketch (Black & White)")
|
| 117 |
+
|
| 118 |
+
# Separated Prompts
|
| 119 |
+
img_prompt = gr.Textbox(label="Image Prompt", placeholder="e.g., Plague doctor, bird mask, cinematic lighting")
|
| 120 |
+
vid_prompt = gr.Textbox(label="Video Prompt", placeholder="e.g., The plague doctor's mask snaps open and closed, chanting")
|
| 121 |
+
|
| 122 |
with gr.Accordion("Settings", open=False):
|
| 123 |
sc = gr.Slider(0, 2, 0.6, label="Sketch Strictness")
|
| 124 |
cs = gr.Slider(0.5, 1, 0.85, label="Color Consistency")
|
| 125 |
+
|
| 126 |
btn = gr.Button("Generate Animation", variant="primary")
|
| 127 |
|
| 128 |
+
# --- OUTPUT COLUMN ---
|
| 129 |
with gr.Column():
|
| 130 |
+
# Frames moved to the top
|
| 131 |
with gr.Row():
|
| 132 |
o1 = gr.Image(label="Start Frame")
|
| 133 |
o2 = gr.Image(label="End Frame")
|
| 134 |
+
|
| 135 |
+
# Video moved below the frames
|
| 136 |
+
vid = gr.Video(label="Final Video")
|
| 137 |
|
| 138 |
+
# Updated click event to include both prompt inputs
|
| 139 |
+
btn.click(
|
| 140 |
+
fn=master_pipeline,
|
| 141 |
+
inputs=[s1, s2, img_prompt, vid_prompt, sc, cs],
|
| 142 |
+
outputs=[o1, o2, vid]
|
| 143 |
+
)
|
| 144 |
|
| 145 |
demo.launch()
|