Fabrice-TIERCELIN commited on
Commit
e49ba69
·
verified ·
1 Parent(s): 629dce9

Upload 9 files

Browse files
Files changed (5) hide show
  1. README.md +4 -6
  2. app.py +612 -218
  3. optimization.py +126 -13
  4. optimization_utils.py +52 -17
  5. requirements.txt +12 -5
README.md CHANGED
@@ -1,14 +1,12 @@
1
  ---
2
- title: FLUX.1 Kontext
3
- emoji:
4
- colorFrom: green
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.29.1
8
  app_file: app.py
9
- pinned: true
10
- license: mit
11
- short_description: 'Kontext image editing on FLUX[dev] '
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Wan 2 2 First Last Frame
3
+ emoji: 💻
4
+ colorFrom: purple
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.29.1
8
  app_file: app.py
9
+ pinned: false
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,8 +1,8 @@
1
- # PyTorch 2.8 (temporary hack)
2
  import os
 
3
  os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
4
 
5
- # Actual demo code
6
  try:
7
  import spaces
8
  except:
@@ -12,268 +12,662 @@ except:
12
  return lambda *dummy_args, **dummy_kwargs: function(*dummy_args, **dummy_kwargs)
13
  return decorator
14
 
15
- import gradio as gr
16
- import numpy as np
17
  import torch
18
- import random
19
- from datetime import datetime
20
-
21
- from PIL import Image
 
 
22
  import tempfile
23
  import shutil
24
- from pathlib import Path
 
 
 
 
 
 
 
 
 
 
25
 
26
- from diffusers import FluxKontextPipeline
27
- from diffusers.utils import load_image
28
 
 
29
  from optimization import optimize_pipeline_
30
 
 
 
 
 
 
 
 
 
 
31
  MAX_SEED = np.iinfo(np.int32).max
32
 
33
- pipe = FluxKontextPipeline.from_pretrained("yuvraj108c/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
34
- optimize_pipeline_(pipe, image=Image.new("RGB", (512, 512)), prompt='prompt')
 
 
 
 
35
 
36
  input_image_debug_value = [None]
 
37
  prompt_debug_value = [None]
38
- number_debug_value = [None]
39
- def save_on_path(img: Image, filename: str, format_: str = None) -> Path:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  """
41
- Save `img` in a unique temporary folder under the given `filename`
42
- and return its absolute path.
43
  """
44
- # 1) unique temporary folder
45
- tmp_dir = Path(tempfile.mkdtemp(prefix="pil_tmp_"))
46
-
47
- # 2) full path of the future file
48
- file_path = tmp_dir / filename
49
-
50
- # 3) save
51
- img.save(file_path, format=format_ or img.format)
52
-
53
- return file_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- @spaces.GPU(duration=40)
56
- def infer(
57
- input_image,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  prompt,
59
- seed = 42,
60
- randomize_seed = False,
61
- guidance_scale = 2.5,
62
- steps = 28,
63
- width = -1,
64
- height = -1,
 
 
65
  progress=gr.Progress(track_tqdm=True)
66
  ):
67
- """
68
- Perform image editing using the FLUX.1 Kontext pipeline.
 
69
 
70
- This function takes an input image and a text prompt to generate a modified version
71
- of the image based on the provided instructions. It uses the FLUX.1 Kontext model
72
- for contextual image editing tasks.
 
 
 
 
 
 
 
 
 
 
 
73
 
74
- Args:
75
- input_image (PIL.Image.Image): The input image to be edited. Will be converted
76
- to RGB format if not already in that format.
77
- prompt (str): Text description of the desired edit to apply to the image.
78
- Examples: "Remove glasses", "Add a hat", "Change background to beach".
79
- seed (int, optional): Random seed for reproducible generation. Defaults to 42.
80
- Must be between 0 and MAX_SEED (2^31 - 1).
81
- randomize_seed (bool, optional): If True, generates a random seed instead of
82
- using the provided seed value. Defaults to False.
83
- guidance_scale (float, optional): Controls how closely the model follows the
84
- prompt. Higher values mean stronger adherence to the prompt but may reduce
85
- image quality. Range: 1.0-10.0. Defaults to 2.5.
86
- steps (int, optional): Controls how many steps to run the diffusion model for.
87
- Range: 1-30. Defaults to 28.
88
- progress (gr.Progress, optional): Gradio progress tracker for monitoring
89
- generation progress. Defaults to gr.Progress(track_tqdm=True).
90
 
91
- Returns:
92
- tuple: A 3-tuple containing:
93
- - PIL.Image.Image: The generated/edited image
94
- - int: The seed value used for generation (useful when randomize_seed=True)
95
- - gr.update: Gradio update object to make the reuse button visible
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
- Example:
98
- >>> edited_image, used_seed, button_update = infer(
99
- ... input_image=my_image,
100
- ... prompt="Add sunglasses",
101
- ... seed=123,
102
- ... randomize_seed=False,
103
- ... guidance_scale=2.5
104
- ... )
105
- """
106
- if randomize_seed:
107
- seed = random.randint(0, MAX_SEED)
108
 
109
- if input_image:
110
- input_image = input_image.convert("RGB")
111
- image = pipe(
112
- image=input_image,
113
- prompt=prompt,
114
- guidance_scale=guidance_scale,
115
- width = input_image.size[0] if width == -1 else width,
116
- height = input_image.size[1] if height == -1 else height,
117
- num_inference_steps=steps,
118
- generator=torch.Generator().manual_seed(seed),
119
- ).images[0]
120
- else:
121
- image = pipe(
122
- prompt=prompt,
123
- guidance_scale=guidance_scale,
124
- num_inference_steps=steps,
125
- generator=torch.Generator().manual_seed(seed),
126
- ).images[0]
127
 
128
- image_filename = datetime.now().strftime("%Y-%m-%d_%H-%M-%S.%f") + '.webp'
129
- path = save_on_path(image, image_filename, format_="WEBP")
130
- return path, gr.update(value=path, visible=True), seed, gr.update(visible=True)
131
-
132
- def infer_example(input_image, prompt):
133
- number=1
134
- if input_image_debug_value[0] is not None or prompt_debug_value[0] is not None or number_debug_value[0] is not None:
135
- input_image=input_image_debug_value[0]
136
- prompt=prompt_debug_value[0]
137
- number=number_debug_value[0]
138
- #input_image_debug_value[0]=prompt_debug_value[0]=prompt_debug_value[0]=None
139
- gallery = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  try:
141
- for i in range(number):
142
- print("Generating #" + str(i + 1) + " image...")
143
- seed = random.randint(0, MAX_SEED)
144
- image, download_button, seed, _ = infer(input_image, prompt, seed, True)
145
- gallery.append(image)
146
  except:
147
- print("Error")
148
- return gallery, seed
 
 
 
 
 
149
 
150
- css="""
151
- #col-container {
152
- margin: 0 auto;
153
- max-width: 960px;
 
 
 
 
 
 
 
 
 
154
  }
155
  """
156
 
157
- with gr.Blocks(css=css) as demo:
158
-
159
- with gr.Column(elem_id="col-container"):
160
- gr.Markdown(f"""# FLUX.1 Kontext [dev]
161
- Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
162
- """)
163
- with gr.Row():
164
- with gr.Column():
165
- input_image = gr.Image(label="Upload the image for editing", type="pil")
166
  with gr.Row():
167
- prompt = gr.Text(
168
- label="Prompt",
169
- show_label=False,
170
- max_lines=1,
171
- placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
172
- container=False,
173
- )
174
- run_button = gr.Button(value="🚀 Edit", variant = "primary", scale=0)
 
 
175
  with gr.Accordion("Advanced Settings", open=False):
176
-
177
- seed = gr.Slider(
178
- label="Seed",
179
- minimum=0,
180
- maximum=MAX_SEED,
181
- step=1,
182
- value=0,
183
- )
184
-
185
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
186
-
187
- guidance_scale = gr.Slider(
188
- label="Guidance Scale",
189
- minimum=1,
190
- maximum=10,
191
- step=0.1,
192
- value=2.5,
193
- )
194
-
195
- steps = gr.Slider(
196
- label="Steps",
197
- minimum=1,
198
- maximum=30,
199
- value=30,
200
- step=1
201
- )
202
-
203
- width = gr.Slider(
204
- label="Output width",
205
- info="-1 = original width",
206
- minimum=-1,
207
- maximum=1024,
208
- value=-1,
209
- step=1
210
- )
211
-
212
- height = gr.Slider(
213
- label="Output height",
214
- info="-1 = original height",
215
- minimum=-1,
216
- maximum=1024,
217
- value=-1,
218
- step=1
219
- )
220
-
221
- with gr.Column():
222
- result = gr.Image(label="Result", show_label=False, interactive=False)
223
- download_button = gr.DownloadButton(elem_id="download_btn", visible=False)
224
- reuse_button = gr.Button("Reuse this image", visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
 
226
  with gr.Row(visible=False):
227
- result_gallery = gr.Gallery(label = 'Downloadable results', show_label = True, interactive = False, elem_id = "gallery1")
228
  gr.Examples(
229
- examples=[
230
- ["monster.png", "Make this monster ride a skateboard on the beach"]
231
- ],
232
- inputs=[input_image, prompt],
233
- outputs=[result_gallery, seed],
234
- fn=infer_example,
235
  run_on_click=True,
236
  cache_examples=True,
237
- cache_mode='lazy'
238
  )
239
  prompt_debug=gr.Textbox(label="Prompt Debug")
240
  input_image_debug=gr.Image(type="pil", label="Image Debug")
241
- number_debug=gr.Slider(label="Number Debug", minimum=1, maximum=50, step=1, value=50)
242
-
243
- gr.Examples(
244
- label = "Examples from demo",
245
- examples=[
246
- ["flowers.png", "turn the flowers into sunflowers"],
247
- ["monster.png", "make this monster ride a skateboard on the beach"],
248
- ["cat.png", "make this cat happy"]
249
- ],
250
- inputs=[input_image, prompt],
251
- outputs=[result, download_button, seed],
252
- fn=infer
253
- )
 
 
 
 
 
 
 
254
 
255
- def handle_field_debug_change(input_image_debug_data, prompt_debug_data, number_debug_data):
256
- prompt_debug_value[0] = prompt_debug_data
 
 
 
 
 
 
 
257
  input_image_debug_value[0] = input_image_debug_data
258
- number_debug_value[0] = number_debug_data
 
 
 
 
 
259
  return []
260
 
261
- inputs_debug=[input_image_debug, prompt_debug, number_debug]
262
 
263
  input_image_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
 
264
  prompt_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
265
- number_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
266
-
267
- gr.on(
268
- triggers=[run_button.click, prompt.submit],
269
- fn = infer,
270
- inputs = [input_image, prompt, seed, randomize_seed, guidance_scale, steps, width, height],
271
- outputs = [result, download_button, seed, reuse_button]
272
- )
273
- reuse_button.click(
274
- fn = lambda image: image,
275
- inputs = [result],
276
- outputs = [input_image]
 
 
 
 
277
  )
278
 
279
- demo.launch(mcp_server=True)
 
 
 
1
  import os
2
+ # PyTorch 2.8 (temporary hack)
3
  os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
4
 
5
+ # --- 1. Model Download and Setup (Diffusers Backend) ---
6
  try:
7
  import spaces
8
  except:
 
12
  return lambda *dummy_args, **dummy_kwargs: function(*dummy_args, **dummy_kwargs)
13
  return decorator
14
 
 
 
15
  import torch
16
+ from diffusers import FlowMatchEulerDiscreteScheduler
17
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
18
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
19
+ from diffusers.utils.export_utils import export_to_video
20
+ import gradio as gr
21
+ import imageio_ffmpeg
22
  import tempfile
23
  import shutil
24
+ import subprocess
25
+ import time
26
+ from datetime import datetime
27
+ import numpy as np
28
+ from PIL import Image
29
+ import random
30
+ import math
31
+ import traceback
32
+ import gc
33
+ from gradio_client import Client, handle_file # Import for API call
34
+ import zipfile
35
 
36
+ # Import optimization and access compiled artifacts
37
+ import optimization
38
 
39
+ # Import the optimization function from the separate file
40
  from optimization import optimize_pipeline_
41
 
42
+ # --- Constants and Model Loading ---
43
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
44
+
45
+ # --- NEW: Flexible Dimension Constants ---
46
+ MAX_DIMENSION = 832
47
+ MIN_DIMENSION = 480
48
+ DIMENSION_MULTIPLE = 16
49
+ SQUARE_SIZE = 480
50
+
51
  MAX_SEED = np.iinfo(np.int32).max
52
 
53
+ FIXED_FPS = 24
54
+ MIN_FRAMES_MODEL = 8
55
+ MAX_FRAMES_MODEL = 81
56
+
57
+ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS, 1)
58
+ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS, 1)
59
 
60
  input_image_debug_value = [None]
61
+ end_image_debug_value = [None]
62
  prompt_debug_value = [None]
63
+ total_second_length_debug_value = [None]
64
+ resolution_debug_value = [None]
65
+ factor_debug_value = [None]
66
+ allocation_time_debug_value = [None]
67
+
68
+ default_negative_prompt = "Vibrant colors, overexposure, static, blurred details, subtitles, error, style, artwork, painting, image, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, many people in the background, walking backwards, overexposure, jumpcut, crossfader, "
69
+
70
+ transformer = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
71
+ subfolder='transformer',
72
+ torch_dtype=torch.bfloat16,
73
+ device_map='cuda',
74
+ )
75
+
76
+ transformer_2 = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
77
+ subfolder='transformer_2',
78
+ torch_dtype=torch.bfloat16,
79
+ device_map='cuda',
80
+ )
81
+
82
+ pipe = WanImageToVideoPipeline.from_pretrained(
83
+ MODEL_ID,
84
+ transformer = transformer,
85
+ transformer_2 = transformer_2,
86
+ torch_dtype=torch.bfloat16,
87
+ )
88
+ pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config, shift=8.0)
89
+ pipe.to('cuda')
90
+
91
+ for i in range(3):
92
+ gc.collect()
93
+ torch.cuda.synchronize()
94
+ torch.cuda.empty_cache()
95
+
96
+ optimize_pipeline_(pipe,
97
+ image=Image.new('RGB', (MAX_DIMENSION, MIN_DIMENSION)),
98
+ prompt='prompt',
99
+ height=MIN_DIMENSION,
100
+ width=MAX_DIMENSION,
101
+ num_frames=MAX_FRAMES_MODEL,
102
+ )
103
+
104
+ def _escape_html(s: str) -> str:
105
+ return (s.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;"))
106
+
107
+ def _error_to_html(err: BaseException) -> str:
108
+ tb = traceback.format_exc()
109
+ return (
110
+ "<div style='padding:12px;border:1px solid #ff4d4f;background:#fff1f0;color:#a8071a;border-radius:8px;'>"
111
+ "<b>Generation failed</b><br/>"
112
+ f"<b>{_escape_html(type(err).__name__)}</b>: {_escape_html(str(err))}"
113
+ "<details style='margin-top:8px;'>"
114
+ "<summary>Show traceback</summary>"
115
+ f"<pre style='white-space:pre-wrap;margin-top:8px;'>{_escape_html(tb)}</pre>"
116
+ "</details>"
117
+ "</div>"
118
+ )
119
+
120
+ # 20250508 pftq: for saving prompt to mp4 metadata comments
121
+ def set_mp4_comments_imageio_ffmpeg(input_file, comments):
122
+ try:
123
+ # Get the path to the bundled FFmpeg binary from imageio-ffmpeg
124
+ ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
125
+
126
+ # Check if input file exists
127
+ if not os.path.exists(input_file):
128
+ #print(f"Error: Input file {input_file} does not exist")
129
+ return False
130
+
131
+ # Create a temporary file path
132
+ temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
133
+
134
+ # FFmpeg command using the bundled binary
135
+ command = [
136
+ ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
137
+ '-i', input_file, # input file
138
+ '-metadata', f'comment={comments}', # set comment metadata
139
+ '-c:v', 'copy', # copy video stream without re-encoding
140
+ '-c:a', 'copy', # copy audio stream without re-encoding
141
+ '-y', # overwrite output file if it exists
142
+ temp_file # temporary output file
143
+ ]
144
+
145
+ # Run the FFmpeg command
146
+ result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
147
+
148
+ if result.returncode == 0:
149
+ # Replace the original file with the modified one
150
+ shutil.move(temp_file, input_file)
151
+ #print(f"Successfully added comments to {input_file}")
152
+ return True
153
+ else:
154
+ # Clean up temp file if FFmpeg fails
155
+ if os.path.exists(temp_file):
156
+ os.remove(temp_file)
157
+ #print(f"Error: FFmpeg failed with message:\n{result.stderr}")
158
+ return False
159
+
160
+ except Exception as e:
161
+ # Clean up temp file in case of other errors
162
+ if 'temp_file' in locals() and os.path.exists(temp_file):
163
+ os.remove(temp_file)
164
+ print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e))
165
+ return False
166
+
167
+ # --- 2. Image Processing and Application Logic ---
168
+ def generate_end_frame(start_img, gen_prompt, progress=gr.Progress(track_tqdm=True)):
169
+ """Calls an external Gradio API to generate an image."""
170
+ if start_img is None:
171
+ raise gr.Error("Please provide a Start Frame first.")
172
+
173
+ hf_token = os.getenv("HF_TOKEN")
174
+ if not hf_token:
175
+ raise gr.Error("HF_TOKEN not found in environment variables. Please set it in your Space secrets.")
176
+
177
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmpfile:
178
+ start_img.save(tmpfile.name)
179
+ tmp_path = tmpfile.name
180
+
181
+ progress(0.1, desc="Connecting to image generation API...")
182
+ client = Client("multimodalart/nano-banana-private")
183
+
184
+ progress(0.5, desc=f"Generating with prompt: '{gen_prompt}'...")
185
+ try:
186
+ result = client.predict(
187
+ prompt=gen_prompt,
188
+ images=[
189
+ {"image": handle_file(tmp_path)}
190
+ ],
191
+ manual_token=hf_token,
192
+ api_name="/unified_image_generator"
193
+ )
194
+ finally:
195
+ os.remove(tmp_path)
196
+
197
+ progress(1.0, desc="Done!")
198
+ print(result)
199
+ return result
200
+
201
+ def switch_to_upload_tab():
202
+ """Returns a gr.Tabs update to switch to the first tab."""
203
+ return gr.Tabs(selected="upload_tab")
204
+
205
+
206
+ def process_image_for_video(image: Image.Image, resolution: int) -> Image.Image:
207
  """
208
+ Resizes an image based on the following rules for video generation.
 
209
  """
210
+ width, height = image.size
211
+
212
+ if resolution < width * height:
213
+ scale = ((width * height) / resolution)**(.5)
214
+ new_width = width / scale
215
+ new_height = height / scale
216
+ final_width = int(math.floor(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
217
+ final_height = int(math.floor(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
218
+
219
+ elif width * height < (MIN_DIMENSION**2):
220
+ scale = ((MIN_DIMENSION**2) / (width * height))**(.5)
221
+ new_width = width * scale
222
+ new_height = height * scale
223
+ final_width = int(math.ceil(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
224
+ final_height = int(math.ceil(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
225
+
226
+ else:
227
+ final_width = int(round(width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
228
+ final_height = int(round(height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
229
+
230
+ return image.resize((final_width, final_height), Image.Resampling.LANCZOS)
231
+
232
+ def resize_and_crop_to_match(target_image, reference_image):
233
+ """Resizes the target image to match the reference image's dimensions."""
234
+ ref_width, ref_height = reference_image.size
235
+ return target_image.resize((ref_width, ref_height), Image.Resampling.LANCZOS)
236
 
237
+ def crop_to_match(target_image, reference_image):
238
+ """Resizes and center-crops the target image to match the reference image's dimensions."""
239
+ ref_width, ref_height = reference_image.size
240
+ target_width, target_height = target_image.size
241
+ scale = max(ref_width / target_width, ref_height / target_height)
242
+ new_width, new_height = int(target_width * scale), int(target_height * scale)
243
+ resized = target_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
244
+ left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
245
+ return resized.crop((left, top, left + ref_width, top + ref_height))
246
+
247
+ def init_view():
248
+ return gr.update(interactive = True)
249
+
250
+ def output_video_change(output_video):
251
+ print('Log output: ' + str(output_video))
252
+ return [gr.update(visible = True)] * 2
253
+
254
+ def generate_video(
255
+ start_image_pil,
256
+ end_image_pil,
257
  prompt,
258
+ negative_prompt=default_negative_prompt,
259
+ resolution=500000,
260
+ duration_seconds=2.1,
261
+ steps=8,
262
+ guidance_scale=1,
263
+ guidance_scale_2=1,
264
+ seed=42,
265
+ randomize_seed=True,
266
  progress=gr.Progress(track_tqdm=True)
267
  ):
268
+ start = time.time()
269
+ allocation_time = 120
270
+ factor = 1
271
 
272
+ if input_image_debug_value[0] is not None or end_image_debug_value[0] is not None or prompt_debug_value[0] is not None or total_second_length_debug_value[0] is not None or allocation_time_debug_value[0] is not None or resolution_debug_value[0] is not None or factor_debug_value[0] is not None:
273
+ start_image_pil = input_image_debug_value[0]
274
+ end_image_pil = end_image_debug_value[0]
275
+ prompt = prompt_debug_value[0]
276
+ duration_seconds = total_second_length_debug_value[0]
277
+ resolution = resolution_debug_value[0]
278
+ factor = factor_debug_value[0]
279
+ allocation_time = allocation_time_debug_value[0]
280
+
281
+ if start_image_pil is None or end_image_pil is None:
282
+ raise gr.Error("Please upload both a start and an end image.")
283
+
284
+ # Step 1: Process the start image to get our target dimensions based on the new rules.
285
+ processed_start_image = process_image_for_video(start_image_pil, resolution)
286
 
287
+ # Step 2: Make the end image match the *exact* dimensions of the processed start image.
288
+ processed_end_image = resize_and_crop_to_match(end_image_pil, processed_start_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
 
290
+ target_height, target_width = processed_start_image.height, processed_start_image.width
291
+
292
+ # Handle seed and frame count
293
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
294
+ num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
295
+
296
+ progress(0.2, desc=f"Generating {num_frames} frames at {target_width}x{target_height} (seed: {current_seed})...")
297
+
298
+ progress(0.1, desc="Preprocessing images...")
299
+ print("Generate a video with the prompt: " + prompt)
300
+ output_frames_list = None
301
+ caught_error = None
302
+ while factor >= 1 and int(allocation_time) > 0:
303
+ try:
304
+ output_frames_list = generate_video_on_gpu(
305
+ start_image_pil,
306
+ end_image_pil,
307
+ prompt,
308
+ negative_prompt,
309
+ int(steps),
310
+ float(guidance_scale),
311
+ float(guidance_scale_2),
312
+ progress,
313
+ allocation_time,
314
+ target_height,
315
+ target_width,
316
+ current_seed,
317
+ (int(((num_frames * factor) - 1) / 4) * 4) + 1,
318
+ processed_start_image,
319
+ processed_end_image
320
+ )
321
+ factor = 0
322
+ caught_error = None
323
+ except BaseException as err:
324
+ print("An exception occurred: " + str(err))
325
+ caught_error = err
326
+ try:
327
+ print('e.message: ' + err.message) # No GPU is currently available for you after 60s
328
+ except Exception as e2:
329
+ print('Failure')
330
+ if not str(err).startswith("No GPU is currently available for you after 60s"):
331
+ factor -= .003
332
+ allocation_time = int(allocation_time) - 1
333
+ except:
334
+ print("An error occurred")
335
+ caught_error = None
336
+ if not str(e).startswith("No GPU is currently available for you after 60s"):
337
+ factor -= .003
338
+ allocation_time = int(allocation_time) - 1
339
+
340
+ if caught_error is not None:
341
+ return [gr.skip(), gr.skip(), gr.skip(), gr.update(value=_error_to_html(caught_error), visible=True), gr.skip()]
342
+
343
+ input_image_debug_value[0] = end_image_debug_value[0] = prompt_debug_value[0] = total_second_length_debug_value[0] = allocation_time_debug_value[0] = factor_debug_value[0] = None
344
 
345
+ progress(0.9, desc="Encoding and saving video...")
 
 
 
 
 
 
 
 
 
 
346
 
347
+ video_path = 'wan_' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S.%f") + '.mp4'
348
+
349
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
350
+ set_mp4_comments_imageio_ffmpeg(video_path, f"Prompt: {prompt} | Negative Prompt: {negative_prompt}");
351
+ print("Video exported: " + video_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
352
 
353
+ progress(1.0, desc="Done!")
354
+ end = time.time()
355
+ secondes = int(end - start)
356
+ minutes = math.floor(secondes / 60)
357
+ secondes = secondes - (minutes * 60)
358
+ hours = math.floor(minutes / 60)
359
+ minutes = minutes - (hours * 60)
360
+ information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
361
+ "The video been generated in " + \
362
+ ((str(hours) + " h, ") if hours != 0 else "") + \
363
+ ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
364
+ str(secondes) + " sec (including " + str(allocation_time) + " seconds of GPU). " + \
365
+ "The video has " + str(int(num_frames * factor)) + " frames. " + \
366
+ "The video resolution is " + str(target_width) + \
367
+ " pixels large and " + str(target_height) + \
368
+ " pixels high, so a resolution of " + f'{target_width * target_height:,}' + " pixels." + \
369
+ " Your prompt is saved into the metadata of the video."
370
+ return [video_path, gr.update(value = video_path, visible = True, interactive = True), current_seed, gr.update(value = information, visible = True), gr.update(interactive = False)]
371
+
372
+ def get_duration(
373
+ start_image_pil,
374
+ end_image_pil,
375
+ prompt,
376
+ negative_prompt,
377
+ steps,
378
+ guidance_scale,
379
+ guidance_scale_2,
380
+ progress,
381
+ allocation_time,
382
+ target_height,
383
+ target_width,
384
+ current_seed,
385
+ num_frames,
386
+ processed_start_image,
387
+ processed_end_image
388
+ ):
389
+ return allocation_time
390
+
391
+ @torch.no_grad()
392
+ @spaces.GPU(duration=get_duration)
393
+ def generate_video_on_gpu(
394
+ start_image_pil,
395
+ end_image_pil,
396
+ prompt,
397
+ negative_prompt,
398
+ steps,
399
+ guidance_scale,
400
+ guidance_scale_2,
401
+ progress,
402
+ allocation_time,
403
+ target_height,
404
+ target_width,
405
+ current_seed,
406
+ num_frames,
407
+ processed_start_image,
408
+ processed_end_image
409
+ ):
410
+ """
411
+ Generates a video by interpolating between a start and end image, guided by a text prompt,
412
+ using the diffusers Wan2.2 pipeline.
413
+ """
414
+
415
+ output_frames_list = pipe(
416
+ image=processed_start_image,
417
+ last_image=processed_end_image,
418
+ prompt=prompt,
419
+ negative_prompt=negative_prompt,
420
+ height=target_height,
421
+ width=target_width,
422
+ num_frames=num_frames,
423
+ guidance_scale=guidance_scale,
424
+ guidance_scale_2=guidance_scale_2,
425
+ num_inference_steps=steps,
426
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
427
+ ).frames[0]
428
+
429
+ return output_frames_list
430
+
431
+ def export_compiled_transformers_to_zip() -> str:
432
+ """
433
+ Bundle compiled_transformer_1 and compiled_transformer_2 into a zip file and return the file path.
434
+ """
435
+ ct1 = getattr(optimization, "COMPILED_TRANSFORMER_1", None)
436
+ ct2 = getattr(optimization, "COMPILED_TRANSFORMER_2", None)
437
+
438
+ if ct1 is None or ct2 is None:
439
+ raise gr.Error("Compiled transformers are not available yet (compilation may have failed).")
440
+
441
+ payload_1 = ct1.to_serializable_dict()
442
+ payload_2 = ct2.to_serializable_dict()
443
+
444
+ tmp_zip = tempfile.NamedTemporaryFile(suffix=".zip", delete=False)
445
+ tmp_zip.close()
446
+
447
+ with zipfile.ZipFile(tmp_zip.name, "w", compression=zipfile.ZIP_DEFLATED) as zf:
448
+ # store with torch.save so users can load easily with torch.load()
449
+ buf1 = tempfile.NamedTemporaryFile(suffix=".pt", delete=False)
450
+ buf1.close()
451
+ torch.save(payload_1, buf1.name)
452
+
453
+ buf2 = tempfile.NamedTemporaryFile(suffix=".pt", delete=False)
454
+ buf2.close()
455
+ torch.save(payload_2, buf2.name)
456
+
457
+ zf.write(buf1.name, arcname="compiled_transformer_1.pt")
458
+ zf.write(buf2.name, arcname="compiled_transformer_2.pt")
459
+
460
+ # cleanup intermediate .pt
461
  try:
462
+ os.remove(buf1.name)
463
+ os.remove(buf2.name)
 
 
 
464
  except:
465
+ pass
466
+
467
+ return tmp_zip.name
468
+
469
+
470
+ # --- 3. Gradio User Interface ---
471
+
472
 
473
+
474
+ js = """
475
+ function createGradioAnimation() {
476
+ window.addEventListener("beforeunload", function(e) {
477
+ if (document.getElementById('dummy_button_id') && !document.getElementById('dummy_button_id').disabled) {
478
+ var confirmationMessage = 'A process is still running. '
479
+ + 'If you leave before saving, your changes will be lost.';
480
+
481
+ (e || window.event).returnValue = confirmationMessage;
482
+ }
483
+ return confirmationMessage;
484
+ });
485
+ return 'Animation created';
486
  }
487
  """
488
 
489
+ # Gradio interface
490
+ with gr.Blocks(js=js) as app:
491
+ gr.Markdown("# Wan 2.2 First/Last Frame Video Fast")
492
+ gr.Markdown("Based on the [Wan 2.2 First/Last Frame workflow](https://www.reddit.com/r/StableDiffusion/comments/1me4306/psa_wan_22_does_first_frame_last_frame_out_of_the/), applied to 🧨 Diffusers + [lightx2v/Wan2.2-Lightning](https://huggingface.co/lightx2v/Wan2.2-Lightning) 8-step LoRA")
493
+
494
+ with gr.Row(elem_id="general_items"):
495
+ with gr.Column():
496
+ with gr.Group(elem_id="group_all"):
 
497
  with gr.Row():
498
+ start_image = gr.Image(type="pil", label="Start Frame", sources=["upload", "clipboard"])
499
+ # Capture the Tabs component in a variable and assign IDs to tabs
500
+ with gr.Tabs(elem_id="group_tabs") as tabs:
501
+ with gr.TabItem("Upload", id="upload_tab"):
502
+ end_image = gr.Image(type="pil", label="End Frame", sources=["upload", "clipboard"])
503
+ with gr.TabItem("Generate", id="generate_tab"):
504
+ generate_5seconds = gr.Button("Generate scene 5 seconds in the future", elem_id="fivesec")
505
+ gr.Markdown("Generate a custom end-frame with an edit model like [Nano Banana](https://huggingface.co/spaces/multimodalart/nano-banana) or [Qwen Image Edit](https://huggingface.co/spaces/multimodalart/Qwen-Image-Edit-Fast)", elem_id="or_item")
506
+ prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images", placeholder="The creature starts to move")
507
+
508
  with gr.Accordion("Advanced Settings", open=False):
509
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.1, label="Video Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
510
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
511
+ resolution = gr.Dropdown([
512
+ ["400,000 px (working)", 400000],
513
+ ["465,920 px (working)", 465920],
514
+ ["495,616 px (working)", 495616],
515
+ ["500,000 px (working)", 500000],
516
+ ["600,000 px (working)", 600000],
517
+ ["700,000 px (working)", 700000],
518
+ ["800,000 px (working)", 800000],
519
+ ["900,000 px (working)", 900000],
520
+ ["1,000,000 px (working)", 1000000],
521
+ ["1,100,000 px (untested)", 1100000],
522
+ ["1,200,000 px (untested)", 1200000],
523
+ ["1,300,000 px (untested)", 1300000],
524
+ ["1,400,000 px (untested)", 1400000],
525
+ ["1,500,000 px (untested)", 1500000]
526
+ ], value=465920, label="Resolution (width x height)", info="Less if the image is smaller")
527
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=8, label="Inference Steps")
528
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
529
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - low noise")
530
+ with gr.Row():
531
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
532
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
533
+
534
+ generate_button = gr.Button("🚀 Generate Video", variant="primary")
535
+ dummy_button = gr.Button(elem_id = "dummy_button_id", visible = False, interactive = False)
536
+
537
+ with gr.Column():
538
+ output_video = gr.Video(label="Generated Video", autoplay = True, loop = True)
539
+ download_button = gr.DownloadButton(elem_id="download_btn", interactive = True)
540
+ video_information = gr.HTML(value = "")
541
+
542
+ with gr.Accordion("🔧 Compilation artifacts (advanced)", open=False):
543
+ gr.Markdown(
544
+ "Télécharge les artefacts compilés AOTInductor générés au démarrage (transformer + transformer_2)."
545
+ )
546
+ export_btn = gr.Button("📦 Préparer l'archive des transformers compilés")
547
+ compiled_download = gr.DownloadButton(label="⬇️ Télécharger compiled_transformers.zip", interactive=False)
548
+
549
+ def _build_and_enable_download():
550
+ path = export_compiled_transformers_to_zip()
551
+ return gr.update(value=path, interactive=True)
552
+
553
+ export_btn.click(fn=_build_and_enable_download, inputs=None, outputs=compiled_download)
554
+
555
+ # Main video generation button
556
+ ui_inputs = [
557
+ start_image,
558
+ end_image,
559
+ prompt,
560
+ negative_prompt_input,
561
+ resolution,
562
+ duration_seconds_input,
563
+ steps_slider,
564
+ guidance_scale_input,
565
+ guidance_scale_2_input,
566
+ seed_input,
567
+ randomize_seed_checkbox
568
+ ]
569
+ ui_outputs = [output_video, download_button, seed_input, video_information, dummy_button]
570
+
571
+ generate_button.click(fn = init_view, inputs = [], outputs = [dummy_button], queue = False, show_progress = False).success(
572
+ fn = generate_video,
573
+ inputs = ui_inputs,
574
+ outputs = ui_outputs
575
+ )
576
+
577
+ generate_5seconds.click(
578
+ fn=switch_to_upload_tab,
579
+ inputs=None,
580
+ outputs=[tabs]
581
+ ).then(
582
+ fn=lambda img: generate_end_frame(img, "this image is a still frame from a movie. generate a new frame with what happens on this scene 5 seconds in the future"),
583
+ inputs=[start_image],
584
+ outputs=[end_image]
585
+ ).success(
586
+ fn=generate_video,
587
+ inputs=ui_inputs,
588
+ outputs=ui_outputs
589
+ )
590
+
591
+ output_video.change(
592
+ fn=output_video_change,
593
+ inputs=[output_video],
594
+ outputs=[download_button, video_information],
595
+ js="document.getElementById('download_btn').click()"
596
+ )
597
 
598
  with gr.Row(visible=False):
 
599
  gr.Examples(
600
+ examples=[["Schoolboy_without_backpack.webp", "Schoolboy_with_backpack.webp", "The schoolboy puts on his schoolbag."]],
601
+ inputs=[start_image, end_image, prompt],
602
+ outputs=ui_outputs,
603
+ fn=generate_video,
 
 
604
  run_on_click=True,
605
  cache_examples=True,
606
+ cache_mode='lazy',
607
  )
608
  prompt_debug=gr.Textbox(label="Prompt Debug")
609
  input_image_debug=gr.Image(type="pil", label="Image Debug")
610
+ end_image_debug=gr.Image(type="pil", label="End Image Debug")
611
+ total_second_length_debug=gr.Slider(label="Duration Debug", minimum=1, maximum=120, value=5, step=0.1)
612
+ resolution_debug = gr.Dropdown([
613
+ ["400,000 px", 400000],
614
+ ["465,920 px", 465920],
615
+ ["495,616 px", 495616],
616
+ ["500,000 px", 500000],
617
+ ["600,000 px", 600000],
618
+ ["700,000 px", 700000],
619
+ ["800,000 px", 800000],
620
+ ["900,000 px", 900000],
621
+ ["1,000,000 px", 1000000],
622
+ ["1,100,000 px", 1100000],
623
+ ["1,200,000 px", 1200000],
624
+ ["1,300,000 px", 1300000],
625
+ ["1,400,000 px", 1400000],
626
+ ["1,500,000 px", 1500000]
627
+ ], value=500000, label="Resolution Debug")
628
+ factor_debug=gr.Slider(label="Factor Debug", minimum=1, maximum=100, value=3.2, step=0.1)
629
+ allocation_time_debug=gr.Slider(label="Allocation Debug", minimum=1, maximum=60 * 20, value=720, step=1)
630
 
631
+ def handle_field_debug_change(
632
+ input_image_debug_data,
633
+ end_image_debug_data,
634
+ prompt_debug_data,
635
+ total_second_length_debug_data,
636
+ resolution_debug_data,
637
+ factor_debug_data,
638
+ allocation_time_debug_data
639
+ ):
640
  input_image_debug_value[0] = input_image_debug_data
641
+ end_image_debug_value[0] = end_image_debug_data
642
+ prompt_debug_value[0] = prompt_debug_data
643
+ total_second_length_debug_value[0] = total_second_length_debug_data
644
+ resolution_debug_value[0] = resolution_debug_data
645
+ factor_debug_value[0] = factor_debug_data
646
+ allocation_time_debug_value[0] = allocation_time_debug_data
647
  return []
648
 
649
+ inputs_debug=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug, resolution_debug, factor_debug, allocation_time_debug]
650
 
651
  input_image_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
652
+ end_image_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
653
  prompt_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
654
+ total_second_length_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
655
+ resolution_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
656
+ factor_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
657
+ allocation_time_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
658
+
659
+ gr.Examples(
660
+ label = "Examples from demo",
661
+ examples = [
662
+ ["poli_tower.png", "tower_takes_off.png", "The man turns around."],
663
+ ["ugly_sonic.jpeg", "squatting_sonic.png", "पात्रं क्षेपणास्त्रं चकमाति।"],
664
+ ["Schoolboy_without_backpack.webp", "Schoolboy_with_backpack.webp", "The schoolboy puts on his schoolbag."],
665
+ ],
666
+ inputs = [start_image, end_image, prompt],
667
+ outputs = ui_outputs,
668
+ fn = generate_video,
669
+ cache_examples = False,
670
  )
671
 
672
+ if __name__ == "__main__":
673
+ app.launch(mcp_server=True, share=True)
optimization.py CHANGED
@@ -1,26 +1,42 @@
1
- """
2
- """
3
-
4
  from typing import Any
5
  from typing import Callable
6
  from typing import ParamSpec
7
 
 
8
  import spaces
9
  import torch
10
  from torch.utils._pytree import tree_map_only
 
 
 
 
 
 
11
 
12
  from optimization_utils import capture_component_call
13
  from optimization_utils import aoti_compile
 
 
 
 
14
 
15
 
16
  P = ParamSpec('P')
17
 
 
 
 
18
 
19
- TRANSFORMER_HIDDEN_DIM = torch.export.Dim('hidden', min=4096, max=8212)
 
 
20
 
21
  TRANSFORMER_DYNAMIC_SHAPES = {
22
- 'hidden_states': {1: TRANSFORMER_HIDDEN_DIM},
23
- 'img_ids': {0: TRANSFORMER_HIDDEN_DIM},
 
 
 
24
  }
25
 
26
  INDUCTOR_CONFIGS = {
@@ -33,28 +49,125 @@ INDUCTOR_CONFIGS = {
33
  }
34
 
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  def optimize_pipeline_(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kwargs):
 
37
 
38
  @spaces.GPU(duration=1500)
39
  def compile_transformer():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- with capture_component_call(pipeline, 'transformer') as call:
42
  pipeline(*args, **kwargs)
43
 
44
  dynamic_shapes = tree_map_only((torch.Tensor, bool), lambda t: None, call.kwargs)
45
  dynamic_shapes |= TRANSFORMER_DYNAMIC_SHAPES
46
 
47
- pipeline.transformer.fuse_qkv_projections()
 
48
 
49
- exported = torch.export.export(
50
  mod=pipeline.transformer,
51
  args=call.args,
52
  kwargs=call.kwargs,
53
  dynamic_shapes=dynamic_shapes,
54
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- return aoti_compile(exported, INDUCTOR_CONFIGS)
 
57
 
58
- transformer_config = pipeline.transformer.config
59
- pipeline.transformer = compile_transformer()
60
- pipeline.transformer.config = transformer_config # pyright: ignore[reportAttributeAccessIssue]
 
 
 
 
1
  from typing import Any
2
  from typing import Callable
3
  from typing import ParamSpec
4
 
5
+ import os
6
  import spaces
7
  import torch
8
  from torch.utils._pytree import tree_map_only
9
+ from torchao.quantization import quantize_
10
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig
11
+ from torchao.quantization import Int8WeightOnlyConfig
12
+ from huggingface_hub import hf_hub_download
13
+
14
+ from io import BytesIO
15
 
16
  from optimization_utils import capture_component_call
17
  from optimization_utils import aoti_compile
18
+ from optimization_utils import drain_module_parameters
19
+
20
+ # NEW: import classes to rebuild compiled objects
21
+ from optimization_utils import ZeroGPUCompiledModel, ZeroGPUWeights
22
 
23
 
24
  P = ParamSpec('P')
25
 
26
+ # Expose compiled models so app.py can offer them for download
27
+ COMPILED_TRANSFORMER_1 = None
28
+ COMPILED_TRANSFORMER_2 = None
29
 
30
+ LATENT_FRAMES_DIM = torch.export.Dim('num_latent_frames', min=8, max=81)
31
+ LATENT_PATCHED_HEIGHT_DIM = torch.export.Dim('latent_patched_height', min=30, max=52)
32
+ LATENT_PATCHED_WIDTH_DIM = torch.export.Dim('latent_patched_width', min=30, max=52)
33
 
34
  TRANSFORMER_DYNAMIC_SHAPES = {
35
+ 'hidden_states': {
36
+ 2: LATENT_FRAMES_DIM,
37
+ 3: 2 * LATENT_PATCHED_HEIGHT_DIM,
38
+ 4: 2 * LATENT_PATCHED_WIDTH_DIM,
39
+ },
40
  }
41
 
42
  INDUCTOR_CONFIGS = {
 
49
  }
50
 
51
 
52
+ def _deserialize_zerogpu_aoti(payload: dict[str, Any]) -> ZeroGPUCompiledModel:
53
+ """
54
+ Rebuild a ZeroGPUCompiledModel from a stable serialized dict produced by
55
+ ZeroGPUCompiledModel.to_serializable_dict().
56
+ """
57
+ if not isinstance(payload, dict):
58
+ raise ValueError(f"Expected dict payload, got: {type(payload)}")
59
+
60
+ fmt = payload.get("format")
61
+ if fmt != "zerogpu_aoti_v1":
62
+ raise ValueError(f"Unsupported payload format: {fmt!r}")
63
+
64
+ archive_bytes = payload.get("archive_bytes")
65
+ constants_map = payload.get("constants_map")
66
+
67
+ if not isinstance(archive_bytes, (bytes, bytearray)):
68
+ raise ValueError("payload['archive_bytes'] must be bytes")
69
+ if not isinstance(constants_map, dict):
70
+ raise ValueError("payload['constants_map'] must be a dict of tensors")
71
+
72
+ # Recreate in-memory archive file (what aoti_load_package expects)
73
+ archive_file = BytesIO(archive_bytes)
74
+
75
+ # Ensure constants are CPU tensors (ZeroGPUWeights will pin/copy for runtime)
76
+ constants_map = {k: v.detach().to("cpu") for k, v in constants_map.items()}
77
+
78
+ weights = ZeroGPUWeights(constants_map, to_cuda=False)
79
+ return ZeroGPUCompiledModel(archive_file, weights)
80
+
81
+
82
+ def load_compiled_transformers_from_hub(
83
+ repo_id: str,
84
+ filename_1: str = "compiled_transformer_1.pt",
85
+ filename_2: str = "compiled_transformer_2.pt",
86
+ ):
87
+ """
88
+ Charge les artefacts précompilés depuis le Hub.
89
+
90
+ IMPORTANT: les fichiers .pt doivent contenir le dict sérialisé produit par
91
+ ZeroGPUCompiledModel.to_serializable_dict() (format "zerogpu_aoti_v1").
92
+ """
93
+ path_1 = hf_hub_download(repo_id=repo_id, filename=filename_1)
94
+ path_2 = hf_hub_download(repo_id=repo_id, filename=filename_2)
95
+
96
+ payload_1 = torch.load(path_1, map_location="cpu", weights_only=False)
97
+ payload_2 = torch.load(path_2, map_location="cpu", weights_only=False)
98
+
99
+ compiled_1 = _deserialize_zerogpu_aoti(payload_1)
100
+ compiled_2 = _deserialize_zerogpu_aoti(payload_2)
101
+
102
+ return compiled_1, compiled_2
103
+
104
+
105
  def optimize_pipeline_(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kwargs):
106
+ global COMPILED_TRANSFORMER_1, COMPILED_TRANSFORMER_2
107
 
108
  @spaces.GPU(duration=1500)
109
  def compile_transformer():
110
+ pipeline.load_lora_weights(
111
+ "Kijai/WanVideo_comfy",
112
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
113
+ adapter_name="lightx2v",
114
+ )
115
+ kwargs_lora = {"load_into_transformer_2": True}
116
+ pipeline.load_lora_weights(
117
+ "Kijai/WanVideo_comfy",
118
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
119
+ adapter_name="lightx2v_2",
120
+ **kwargs_lora,
121
+ )
122
+ pipeline.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1.0, 1.0])
123
+ pipeline.fuse_lora(adapter_names=["lightx2v"], lora_scale=3.0, components=["transformer"])
124
+ pipeline.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1.0, components=["transformer_2"])
125
+ pipeline.unload_lora_weights()
126
 
127
+ with capture_component_call(pipeline, "transformer") as call:
128
  pipeline(*args, **kwargs)
129
 
130
  dynamic_shapes = tree_map_only((torch.Tensor, bool), lambda t: None, call.kwargs)
131
  dynamic_shapes |= TRANSFORMER_DYNAMIC_SHAPES
132
 
133
+ quantize_(pipeline.transformer, Float8DynamicActivationFloat8WeightConfig())
134
+ quantize_(pipeline.transformer_2, Float8DynamicActivationFloat8WeightConfig())
135
 
136
+ exported_1 = torch.export.export(
137
  mod=pipeline.transformer,
138
  args=call.args,
139
  kwargs=call.kwargs,
140
  dynamic_shapes=dynamic_shapes,
141
  )
142
+ exported_2 = torch.export.export(
143
+ mod=pipeline.transformer_2,
144
+ args=call.args,
145
+ kwargs=call.kwargs,
146
+ dynamic_shapes=dynamic_shapes,
147
+ )
148
+
149
+ compiled_1 = aoti_compile(exported_1, INDUCTOR_CONFIGS)
150
+ compiled_2 = aoti_compile(exported_2, INDUCTOR_CONFIGS)
151
+ return compiled_1, compiled_2
152
+
153
+ quantize_(pipeline.text_encoder, Int8WeightOnlyConfig())
154
+
155
+ use_precompiled = False
156
+ precompiled_repo = "Fabrice-TIERCELIN/Wan_2.2_compiled"
157
+
158
+ if use_precompiled:
159
+ compiled_transformer_1, compiled_transformer_2 = load_compiled_transformers_from_hub(
160
+ repo_id=precompiled_repo
161
+ )
162
+ else:
163
+ compiled_transformer_1, compiled_transformer_2 = compile_transformer()
164
+
165
+ # expose for downloads
166
+ COMPILED_TRANSFORMER_1 = compiled_transformer_1
167
+ COMPILED_TRANSFORMER_2 = compiled_transformer_2
168
 
169
+ pipeline.transformer.forward = compiled_transformer_1
170
+ drain_module_parameters(pipeline.transformer)
171
 
172
+ pipeline.transformer_2.forward = compiled_transformer_2
173
+ drain_module_parameters(pipeline.transformer_2)
 
optimization_utils.py CHANGED
@@ -10,7 +10,6 @@ from unittest.mock import patch
10
  import torch
11
  from torch._inductor.package.package import package_aoti
12
  from torch.export.pt2_archive._package import AOTICompiledModel
13
- from torch.export.pt2_archive._package_weights import TensorProperties
14
  from torch.export.pt2_archive._package_weights import Weights
15
 
16
 
@@ -21,31 +20,57 @@ INDUCTOR_CONFIGS_OVERRIDES = {
21
  }
22
 
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  class ZeroGPUCompiledModel:
25
- def __init__(self, archive_file: torch.types.FileLike, weights: Weights, cuda: bool = False):
26
  self.archive_file = archive_file
27
  self.weights = weights
28
- if cuda:
29
- self.weights_to_cuda_()
30
  self.compiled_model: ContextVar[AOTICompiledModel | None] = ContextVar('compiled_model', default=None)
31
- def weights_to_cuda_(self):
32
- for name in self.weights:
33
- tensor, properties = self.weights.get_weight(name)
34
- self.weights[name] = (tensor.to('cuda'), properties)
35
  def __call__(self, *args, **kwargs):
36
  if (compiled_model := self.compiled_model.get()) is None:
37
- constants_map = {name: value[0] for name, value in self.weights.items()}
38
  compiled_model = cast(AOTICompiledModel, torch._inductor.aoti_load_package(self.archive_file))
39
- compiled_model.load_constants(constants_map, check_full_update=True, user_managed=True)
40
  self.compiled_model.set(compiled_model)
41
  return compiled_model(*args, **kwargs)
42
  def __reduce__(self):
43
- weight_dict: dict[str, tuple[torch.Tensor, TensorProperties]] = {}
44
- for name in self.weights:
45
- tensor, properties = self.weights.get_weight(name)
46
- tensor_ = torch.empty_like(tensor, device='cpu').pin_memory()
47
- weight_dict[name] = (tensor_.copy_(tensor).detach().share_memory_(), properties)
48
- return ZeroGPUCompiledModel, (self.archive_file, Weights(weight_dict), True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
 
51
  def aoti_compile(
@@ -61,7 +86,8 @@ def aoti_compile(
61
  files: list[str | Weights] = [file for file in artifacts if isinstance(file, str)]
62
  package_aoti(archive_file, files)
63
  weights, = (artifact for artifact in artifacts if isinstance(artifact, Weights))
64
- return ZeroGPUCompiledModel(archive_file, weights)
 
65
 
66
 
67
  @contextlib.contextmanager
@@ -94,3 +120,12 @@ def capture_component_call(
94
  except CapturedCallException as e:
95
  captured_call.args = e.args
96
  captured_call.kwargs = e.kwargs
 
 
 
 
 
 
 
 
 
 
10
  import torch
11
  from torch._inductor.package.package import package_aoti
12
  from torch.export.pt2_archive._package import AOTICompiledModel
 
13
  from torch.export.pt2_archive._package_weights import Weights
14
 
15
 
 
20
  }
21
 
22
 
23
+ class ZeroGPUWeights:
24
+ def __init__(self, constants_map: dict[str, torch.Tensor], to_cuda: bool = False):
25
+ if to_cuda:
26
+ self.constants_map = {name: tensor.to('cuda') for name, tensor in constants_map.items()}
27
+ else:
28
+ self.constants_map = constants_map
29
+ def __reduce__(self):
30
+ constants_map: dict[str, torch.Tensor] = {}
31
+ for name, tensor in self.constants_map.items():
32
+ tensor_ = torch.empty_like(tensor, device='cpu').pin_memory()
33
+ constants_map[name] = tensor_.copy_(tensor).detach().share_memory_()
34
+ return ZeroGPUWeights, (constants_map, True)
35
+
36
+
37
  class ZeroGPUCompiledModel:
38
+ def __init__(self, archive_file: torch.types.FileLike, weights: ZeroGPUWeights):
39
  self.archive_file = archive_file
40
  self.weights = weights
 
 
41
  self.compiled_model: ContextVar[AOTICompiledModel | None] = ContextVar('compiled_model', default=None)
 
 
 
 
42
  def __call__(self, *args, **kwargs):
43
  if (compiled_model := self.compiled_model.get()) is None:
 
44
  compiled_model = cast(AOTICompiledModel, torch._inductor.aoti_load_package(self.archive_file))
45
+ compiled_model.load_constants(self.weights.constants_map, check_full_update=True, user_managed=True)
46
  self.compiled_model.set(compiled_model)
47
  return compiled_model(*args, **kwargs)
48
  def __reduce__(self):
49
+ return ZeroGPUCompiledModel, (self.archive_file, self.weights)
50
+
51
+ def to_serializable_dict(self) -> dict[str, Any]:
52
+ """
53
+ Return a stable representation that can be stored to disk and later re-loaded
54
+ with torch.load, without depending on Gradio runtime state.
55
+ """
56
+ # BytesIO is file-like; extract raw bytes
57
+ if hasattr(self.archive_file, "getvalue"):
58
+ archive_bytes = self.archive_file.getvalue()
59
+ else:
60
+ # fallback best-effort
61
+ pos = self.archive_file.tell()
62
+ self.archive_file.seek(0)
63
+ archive_bytes = self.archive_file.read()
64
+ self.archive_file.seek(pos)
65
+
66
+ # store constants on CPU in a safe format
67
+ constants_cpu = {k: v.detach().to("cpu") for k, v in self.weights.constants_map.items()}
68
+
69
+ return {
70
+ "format": "zerogpu_aoti_v1",
71
+ "archive_bytes": archive_bytes,
72
+ "constants_map": constants_cpu,
73
+ }
74
 
75
 
76
  def aoti_compile(
 
86
  files: list[str | Weights] = [file for file in artifacts if isinstance(file, str)]
87
  package_aoti(archive_file, files)
88
  weights, = (artifact for artifact in artifacts if isinstance(artifact, Weights))
89
+ zerogpu_weights = ZeroGPUWeights({name: weights.get_weight(name)[0] for name in weights})
90
+ return ZeroGPUCompiledModel(archive_file, zerogpu_weights)
91
 
92
 
93
  @contextlib.contextmanager
 
120
  except CapturedCallException as e:
121
  captured_call.args = e.args
122
  captured_call.kwargs = e.kwargs
123
+
124
+
125
+ def drain_module_parameters(module: torch.nn.Module):
126
+ state_dict_meta = {name: {'device': tensor.device, 'dtype': tensor.dtype} for name, tensor in module.state_dict().items()}
127
+ state_dict = {name: torch.nn.Parameter(torch.empty_like(tensor, device='cpu')) for name, tensor in module.state_dict().items()}
128
+ module.load_state_dict(state_dict, assign=True)
129
+ for name, param in state_dict.items():
130
+ meta = state_dict_meta[name]
131
+ param.data = torch.Tensor([]).to(**meta)
requirements.txt CHANGED
@@ -1,5 +1,12 @@
1
- transformers
2
- git+https://github.com/huggingface/diffusers.git
3
- accelerate
4
- safetensors
5
- sentencepiece
 
 
 
 
 
 
 
 
1
+ git+https://github.com/YassineT-cdc/diffusers.git@wan22-loras_optimized_contigous
2
+
3
+ transformers==4.57.3
4
+ accelerate==1.12.0
5
+ safetensors==0.7.0
6
+ sentencepiece==0.2.1
7
+ peft==0.18.0
8
+ ftfy==6.3.1
9
+ imageio==2.37.2
10
+ imageio-ffmpeg==0.6.0
11
+
12
+ torchao==0.14.1