ICGenAIShare04 commited on
Commit
d2d418c
Β·
1 Parent(s): 3175b6a

fix tooncrafter api call

Browse files
Files changed (1) hide show
  1. app.py +27 -23
app.py CHANGED
@@ -3,7 +3,6 @@ import spaces
3
  import torch
4
  from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel
5
  from gradio_client import Client, handle_file
6
- import os
7
 
8
  # ==========================================
9
  # 1. LAZY LOAD LOCAL CARTOON MODELS
@@ -19,7 +18,6 @@ def load_cartoon_models():
19
  "xinsir/controlnet-scribble-sdxl-1.0",
20
  torch_dtype=dtype
21
  )
22
- # Animagine XL 3.1 is the premier model for 2D animation/cartoon styles
23
  image_pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
24
  "cagliostrolab/animagine-xl-3.1",
25
  controlnet=controlnet,
@@ -35,7 +33,6 @@ def generate_cartoons(sketch_1, sketch_2, user_prompt, ctrl_scale):
35
  load_cartoon_models()
36
  image_pipe.to("cuda")
37
 
38
- # Animagine responds best to these specific quality tags
39
  master_prompt = f"{user_prompt}, masterpiece, best quality, highly detailed, professional 2d animation, flat colors, anime style"
40
  neg_prompt = "nsfw, photorealistic, 3d render, ugly, messy lines, bad anatomy, bad hands, missing fingers, lowres, worst quality"
41
 
@@ -59,9 +56,6 @@ def generate_cartoons(sketch_1, sketch_2, user_prompt, ctrl_scale):
59
  controlnet_conditioning_scale=ctrl_scale
60
  ).images[0]
61
 
62
- image_pipe.to("cpu")
63
-
64
- # Save temporarily to pass to the ToonCrafter API
65
  img_1_path, img_2_path = "frame1.png", "frame2.png"
66
  img_1.save(img_1_path)
67
  img_2.save(img_2_path)
@@ -72,24 +66,32 @@ def generate_cartoons(sketch_1, sketch_2, user_prompt, ctrl_scale):
72
  # 3. REMOTE API CALL (Cartoon -> Video)
73
  # ==========================================
74
  def run_tooncrafter(img_1_path, img_2_path, prompt):
75
- print("🎬 Sending frames to official ToonCrafter Space...")
 
 
76
  try:
77
- # Connect silently to the free ToonCrafter HF Space
78
- client = Client("Doubiiu/tooncrafter")
79
  result = client.predict(
80
- image0=handle_file(img_1_path),
81
- image1=handle_file(img_2_path),
82
  prompt=prompt,
83
- seed=123,
84
- eta=1.0,
85
  cfg_scale=7.5,
86
- steps=15,
87
- frame_num=16,
88
- api_name="/predict"
 
 
89
  )
90
- print("βœ… Animation Complete!")
91
- # The result returns a tuple, the video path is the second item [1]
92
- return result[1]
 
 
 
 
 
 
93
  except Exception as e:
94
  raise gr.Error(f"ToonCrafter API Error: {str(e)}")
95
 
@@ -97,17 +99,19 @@ def run_tooncrafter(img_1_path, img_2_path, prompt):
97
  # 4. MASTER PIPELINE CONTROLLER
98
  # ==========================================
99
  def process_full_animation(sketch_1, sketch_2, prompt, ctrl_scale):
100
- # Step 1: Run local ZeroGPU
101
  img_1_path, img_2_path = generate_cartoons(sketch_1, sketch_2, prompt, ctrl_scale)
102
- # Step 2: Pass to ToonCrafter
 
103
  video_path = run_tooncrafter(img_1_path, img_2_path, prompt)
 
104
  return img_1_path, img_2_path, video_path
105
 
106
  # ==========================================
107
  # 5. GRADIO INTERFACE
108
  # ==========================================
109
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
110
- gr.Markdown("# ✏️ Sketch-to-ToonCrafter Animation Studio")
111
  gr.Markdown("Upload two sketches. We use **Animagine XL 3.1** locally to turn them into professional cartoons, and pass them to **ToonCrafter** to animate the motion!")
112
 
113
  with gr.Row():
@@ -116,7 +120,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
116
  sketch_2 = gr.Image(type="pil", label="End Sketch (Black & White lines)", image_mode="RGB")
117
  prompt = gr.Textbox(
118
  label="Character & Motion Description",
119
- placeholder="e.g., A boy in a red shirt and blue jeans jumping"
120
  )
121
  ctrl_scale = gr.Slider(minimum=0.0, maximum=2.0, value=1.0, step=0.05, label="Sketch Adherence Strength")
122
  generate_btn = gr.Button("Create Animation", variant="primary")
 
3
  import torch
4
  from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel
5
  from gradio_client import Client, handle_file
 
6
 
7
  # ==========================================
8
  # 1. LAZY LOAD LOCAL CARTOON MODELS
 
18
  "xinsir/controlnet-scribble-sdxl-1.0",
19
  torch_dtype=dtype
20
  )
 
21
  image_pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
22
  "cagliostrolab/animagine-xl-3.1",
23
  controlnet=controlnet,
 
33
  load_cartoon_models()
34
  image_pipe.to("cuda")
35
 
 
36
  master_prompt = f"{user_prompt}, masterpiece, best quality, highly detailed, professional 2d animation, flat colors, anime style"
37
  neg_prompt = "nsfw, photorealistic, 3d render, ugly, messy lines, bad anatomy, bad hands, missing fingers, lowres, worst quality"
38
 
 
56
  controlnet_conditioning_scale=ctrl_scale
57
  ).images[0]
58
 
 
 
 
59
  img_1_path, img_2_path = "frame1.png", "frame2.png"
60
  img_1.save(img_1_path)
61
  img_2.save(img_2_path)
 
66
  # 3. REMOTE API CALL (Cartoon -> Video)
67
  # ==========================================
68
  def run_tooncrafter(img_1_path, img_2_path, prompt):
69
+ print("🎬 Connecting to official ToonCrafter Space...")
70
+ client = Client("Doubiiu/tooncrafter")
71
+
72
  try:
73
+ print("πŸ”„ Submitting to ToonCrafter API (/get_image)...")
74
+ # We now use the EXACT keyword arguments and order required by the API
75
  result = client.predict(
76
+ image=handle_file(img_1_path),
 
77
  prompt=prompt,
78
+ steps=25, # Lowered from default 50 to 25 so you don't wait in queue forever
 
79
  cfg_scale=7.5,
80
+ eta=1.0,
81
+ fs=10, # FPS
82
+ seed=123,
83
+ image2=handle_file(img_2_path),
84
+ api_name="/get_image"
85
  )
86
+ print("βœ… ToonCrafter Generation Complete!")
87
+
88
+ # The API documentation says it returns a Dict: {video: filepath, subtitles: None}
89
+ if isinstance(result, dict) and 'video' in result:
90
+ return result['video']
91
+
92
+ # Fallback just in case they return the raw string
93
+ return result
94
+
95
  except Exception as e:
96
  raise gr.Error(f"ToonCrafter API Error: {str(e)}")
97
 
 
99
  # 4. MASTER PIPELINE CONTROLLER
100
  # ==========================================
101
  def process_full_animation(sketch_1, sketch_2, prompt, ctrl_scale):
102
+ # Step 1: Run local ZeroGPU (Cartoons)
103
  img_1_path, img_2_path = generate_cartoons(sketch_1, sketch_2, prompt, ctrl_scale)
104
+
105
+ # Step 2: Pass to remote API (Video)
106
  video_path = run_tooncrafter(img_1_path, img_2_path, prompt)
107
+
108
  return img_1_path, img_2_path, video_path
109
 
110
  # ==========================================
111
  # 5. GRADIO INTERFACE
112
  # ==========================================
113
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
114
+ gr.Markdown("# ✏️ Sketch-to-ToonCrafter Studio")
115
  gr.Markdown("Upload two sketches. We use **Animagine XL 3.1** locally to turn them into professional cartoons, and pass them to **ToonCrafter** to animate the motion!")
116
 
117
  with gr.Row():
 
120
  sketch_2 = gr.Image(type="pil", label="End Sketch (Black & White lines)", image_mode="RGB")
121
  prompt = gr.Textbox(
122
  label="Character & Motion Description",
123
+ placeholder="e.g., A boy in a red shirt jumping"
124
  )
125
  ctrl_scale = gr.Slider(minimum=0.0, maximum=2.0, value=1.0, step=0.05, label="Sketch Adherence Strength")
126
  generate_btn = gr.Button("Create Animation", variant="primary")