rahul7star commited on
Commit
d7fc5bc
·
verified ·
1 Parent(s): d9b2184

Update app_exp.py

Browse files
Files changed (1) hide show
  1. app_exp.py +56 -45
app_exp.py CHANGED
@@ -13,7 +13,7 @@ from PIL import Image
13
  REPO_PATH = "LongCat-Video"
14
  CHECKPOINT_DIR = os.path.join(REPO_PATH, "weights", "LongCat-Video")
15
 
16
- # Clone the repository if it doesn't exist
17
  if not os.path.exists(REPO_PATH):
18
  print(f"Cloning LongCat-Video repository to '{REPO_PATH}'...")
19
  subprocess.run(
@@ -23,6 +23,7 @@ if not os.path.exists(REPO_PATH):
23
 
24
  sys.path.insert(0, os.path.abspath(REPO_PATH))
25
 
 
26
  from huggingface_hub import snapshot_download
27
  from longcat_video.pipeline_longcat_video import LongCatVideoPipeline
28
  from longcat_video.modules.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
@@ -33,7 +34,7 @@ from transformers import AutoTokenizer, UMT5EncoderModel
33
  from diffusers.utils import export_to_video
34
  from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
35
 
36
- # Download weights if needed
37
  if not os.path.exists(CHECKPOINT_DIR):
38
  snapshot_download(
39
  repo_id="meituan-longcat/LongCat-Video",
@@ -49,11 +50,13 @@ torch_dtype = torch.bfloat16 if device == "cuda" else torch.float32
49
  print("--- Initializing Models ---")
50
  try:
51
  cp_split_hw = context_parallel_util.get_optimal_split(1)
 
52
  tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_DIR, subfolder="tokenizer", torch_dtype=torch_dtype)
53
  text_encoder = UMT5EncoderModel.from_pretrained(CHECKPOINT_DIR, subfolder="text_encoder", torch_dtype=torch_dtype)
54
  vae = AutoencoderKLWan.from_pretrained(CHECKPOINT_DIR, subfolder="vae", torch_dtype=torch_dtype)
55
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(CHECKPOINT_DIR, subfolder="scheduler", torch_dtype=torch_dtype)
56
 
 
57
  bnb_4bit_config = DiffusersBitsAndBytesConfig(
58
  load_in_4bit=True,
59
  bnb_4bit_quant_type="nf4",
@@ -68,7 +71,7 @@ try:
68
  subfolder="dit",
69
  cp_split_hw=cp_split_hw,
70
  torch_dtype=torch_dtype,
71
- quantization_config=bnb_4bit_config
72
  )
73
 
74
  pipe = LongCatVideoPipeline(
@@ -77,24 +80,27 @@ try:
77
  vae=vae,
78
  scheduler=scheduler,
79
  dit=dit,
80
- )
81
- pipe.to(device)
82
 
83
  pipe.dit.load_lora(os.path.join(CHECKPOINT_DIR, 'lora/cfg_step_lora.safetensors'), 'cfg_step_lora')
84
  pipe.dit.load_lora(os.path.join(CHECKPOINT_DIR, 'lora/refinement_lora.safetensors'), 'refinement_lora')
85
 
 
86
  except Exception as e:
87
- print("Failed to load models:", e)
88
  pipe = None
89
 
 
 
90
  def torch_gc():
91
  if torch.cuda.is_available():
92
  torch.cuda.empty_cache()
93
  torch.cuda.ipc_collect()
94
 
95
- def check_duration(*args, duration_t2v=2, **kwargs):
 
96
  fps = 30
97
- return duration_t2v * fps # total frames
98
 
99
  @spaces.GPU(duration=check_duration)
100
  def generate_video(
@@ -112,27 +118,25 @@ def generate_video(
112
  if pipe is None:
113
  raise gr.Error("Models failed to load.")
114
 
115
- fps = 30
116
- num_frames = duration_t2v * fps
117
  print(prompt)
118
 
119
- generator = torch.Generator(device=device).manual_seed(int(seed))
120
  is_distill = use_distill or use_refine
121
-
122
  if is_distill:
123
  pipe.dit.enable_loras(['cfg_step_lora'])
124
  num_inference_steps = 16
125
  guidance_scale = 1.0
126
- current_neg_prompt = ""
127
  else:
128
  num_inference_steps = 50
129
  guidance_scale = 4.0
130
- current_neg_prompt = neg_prompt
131
 
132
  if mode == "t2v":
133
  output = pipe.generate_t2v(
134
  prompt=prompt,
135
- negative_prompt=current_neg_prompt,
136
  height=height,
137
  width=width,
138
  num_frames=num_frames,
@@ -146,7 +150,7 @@ def generate_video(
146
  output = pipe.generate_i2v(
147
  image=pil_image,
148
  prompt=prompt,
149
- negative_prompt=current_neg_prompt,
150
  resolution=resolution,
151
  num_frames=num_frames,
152
  num_inference_steps=num_inference_steps,
@@ -157,80 +161,87 @@ def generate_video(
157
 
158
  if is_distill:
159
  pipe.dit.disable_all_loras()
 
160
  torch_gc()
161
 
162
  if use_refine:
163
- progress(0.5, desc="Refinement")
164
  pipe.dit.enable_loras(['refinement_lora'])
165
  pipe.dit.enable_bsa()
166
- stage1_video_pil = [(frame * 255).astype(np.uint8) for frame in output]
167
- stage1_video_pil = [Image.fromarray(img) for img in stage1_video_pil]
168
- refine_image = Image.fromarray(image) if mode == 'i2v' else None
 
 
169
  output = pipe.generate_refine(
170
- image=refine_image,
171
  prompt=prompt,
172
- stage1_video=stage1_video_pil,
173
- num_cond_frames=1 if mode == 'i2v' else 0,
174
  num_inference_steps=50,
175
  generator=generator,
176
  )[0]
 
177
  pipe.dit.disable_all_loras()
178
  pipe.dit.disable_bsa()
179
  torch_gc()
180
 
181
- progress(1.0, desc="Exporting video")
182
- print("video generated")
183
- with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_video_file:
184
- export_to_video(output, temp_video_file.name, fps=fps)
185
- return temp_video_file.name
186
 
187
- # --- Gradio UI ---
188
- css = '.fillable{max-width: 960px !important}'
 
189
  with gr.Blocks(css=css) as demo:
190
  gr.Markdown("# 🎬 LongCat-Video")
191
  gr.Markdown("13.6B parameter dense video-generation model — [HuggingFace](https://huggingface.co/meituan-longcat/LongCat-Video)")
192
 
193
- with gr.Tabs() as tabs:
 
194
  with gr.TabItem("Text-to-Video"):
195
  mode_t2v = gr.State("t2v")
196
  prompt_t2v = gr.Textbox(label="Prompt", lines=4)
197
- neg_prompt_t2v = gr.Textbox(label="Negative Prompt", lines=2, value="ugly, blurry, low quality, static, subtitles")
198
- height_t2v = gr.Slider(256, 1024, step=64, value=480, label="Height")
199
- width_t2v = gr.Slider(256, 1024, step=64, value=832, label="Width")
200
- seed_t2v = gr.Number(label="Seed", value=42, precision=0)
201
  distill_t2v = gr.Checkbox(label="Use Distill Mode", value=True)
202
  refine_t2v = gr.Checkbox(label="Use Refine Mode", value=False)
203
- duration_t2v = gr.Slider(1, 20, step=1, value=2, label="Video Duration (seconds)")
 
204
  t2v_button = gr.Button("Generate Video")
205
- video_output_t2v = gr.Video(label="Generated Video", interactive=False)
206
 
207
  t2v_button.click(
208
  fn=generate_video,
209
- inputs=[mode_t2v, prompt_t2v, neg_prompt_t2v, gr.State(None),
210
  height_t2v, width_t2v, gr.State(None),
211
  seed_t2v, distill_t2v, refine_t2v, duration_t2v],
212
- outputs=video_output_t2v
213
  )
214
 
 
215
  with gr.TabItem("Image-to-Video"):
216
  mode_i2v = gr.State("i2v")
217
  image_i2v = gr.Image(type="numpy", label="Input Image")
218
  prompt_i2v = gr.Textbox(label="Prompt", lines=4)
219
- neg_prompt_i2v = gr.Textbox(label="Negative Prompt", lines=2, value="ugly, blurry, low quality, static, subtitles, watermark")
220
  resolution_i2v = gr.Dropdown(["480p", "720p"], value="480p", label="Resolution")
221
- seed_i2v = gr.Number(label="Seed", value=42, precision=0)
222
  distill_i2v = gr.Checkbox(label="Use Distill Mode", value=True)
223
  refine_i2v = gr.Checkbox(label="Use Refine Mode", value=False)
224
- duration_i2v = gr.Slider(1, 20, step=1, value=2, label="Video Duration (seconds)")
 
225
  i2v_button = gr.Button("Generate Video")
226
- video_output_i2v = gr.Video(label="Generated Video", interactive=False)
227
 
228
  i2v_button.click(
229
  fn=generate_video,
230
- inputs=[mode_i2v, prompt_i2v, neg_prompt_i2v, image_i2v,
231
  gr.State(None), gr.State(None), resolution_i2v,
232
  seed_i2v, distill_i2v, refine_i2v, duration_i2v],
233
- outputs=video_output_i2v
234
  )
235
 
236
  if __name__ == "__main__":
 
13
  REPO_PATH = "LongCat-Video"
14
  CHECKPOINT_DIR = os.path.join(REPO_PATH, "weights", "LongCat-Video")
15
 
16
+ # Clone repo if missing
17
  if not os.path.exists(REPO_PATH):
18
  print(f"Cloning LongCat-Video repository to '{REPO_PATH}'...")
19
  subprocess.run(
 
23
 
24
  sys.path.insert(0, os.path.abspath(REPO_PATH))
25
 
26
+ # Imports from LongCat repo
27
  from huggingface_hub import snapshot_download
28
  from longcat_video.pipeline_longcat_video import LongCatVideoPipeline
29
  from longcat_video.modules.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
 
34
  from diffusers.utils import export_to_video
35
  from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
36
 
37
+ # Download model weights if missing
38
  if not os.path.exists(CHECKPOINT_DIR):
39
  snapshot_download(
40
  repo_id="meituan-longcat/LongCat-Video",
 
50
  print("--- Initializing Models ---")
51
  try:
52
  cp_split_hw = context_parallel_util.get_optimal_split(1)
53
+
54
  tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_DIR, subfolder="tokenizer", torch_dtype=torch_dtype)
55
  text_encoder = UMT5EncoderModel.from_pretrained(CHECKPOINT_DIR, subfolder="text_encoder", torch_dtype=torch_dtype)
56
  vae = AutoencoderKLWan.from_pretrained(CHECKPOINT_DIR, subfolder="vae", torch_dtype=torch_dtype)
57
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(CHECKPOINT_DIR, subfolder="scheduler", torch_dtype=torch_dtype)
58
 
59
+ # ✅ 4-bit quantization enabled
60
  bnb_4bit_config = DiffusersBitsAndBytesConfig(
61
  load_in_4bit=True,
62
  bnb_4bit_quant_type="nf4",
 
71
  subfolder="dit",
72
  cp_split_hw=cp_split_hw,
73
  torch_dtype=torch_dtype,
74
+ quantization_config=bnb_4bit_config # ✅ added
75
  )
76
 
77
  pipe = LongCatVideoPipeline(
 
80
  vae=vae,
81
  scheduler=scheduler,
82
  dit=dit,
83
+ ).to(device)
 
84
 
85
  pipe.dit.load_lora(os.path.join(CHECKPOINT_DIR, 'lora/cfg_step_lora.safetensors'), 'cfg_step_lora')
86
  pipe.dit.load_lora(os.path.join(CHECKPOINT_DIR, 'lora/refinement_lora.safetensors'), 'refinement_lora')
87
 
88
+ print("--- Models loaded successfully ---")
89
  except Exception as e:
90
+ print(" Model load error:", e)
91
  pipe = None
92
 
93
+
94
+ # -------------------- GPU Cleanup --------------------
95
  def torch_gc():
96
  if torch.cuda.is_available():
97
  torch.cuda.empty_cache()
98
  torch.cuda.ipc_collect()
99
 
100
+ # -------------------- Video Generation --------------------
101
+ def check_duration(*_args, duration_t2v=2, **_kwargs):
102
  fps = 30
103
+ return duration_t2v * fps
104
 
105
  @spaces.GPU(duration=check_duration)
106
  def generate_video(
 
118
  if pipe is None:
119
  raise gr.Error("Models failed to load.")
120
 
121
+ generator = torch.Generator(device=device).manual_seed(int(seed))
122
+ num_frames = int(duration_t2v * 30) # ✅ duration-based frame count
123
  print(prompt)
124
 
 
125
  is_distill = use_distill or use_refine
 
126
  if is_distill:
127
  pipe.dit.enable_loras(['cfg_step_lora'])
128
  num_inference_steps = 16
129
  guidance_scale = 1.0
130
+ neg = ""
131
  else:
132
  num_inference_steps = 50
133
  guidance_scale = 4.0
134
+ neg = neg_prompt
135
 
136
  if mode == "t2v":
137
  output = pipe.generate_t2v(
138
  prompt=prompt,
139
+ negative_prompt=neg,
140
  height=height,
141
  width=width,
142
  num_frames=num_frames,
 
150
  output = pipe.generate_i2v(
151
  image=pil_image,
152
  prompt=prompt,
153
+ negative_prompt=neg,
154
  resolution=resolution,
155
  num_frames=num_frames,
156
  num_inference_steps=num_inference_steps,
 
161
 
162
  if is_distill:
163
  pipe.dit.disable_all_loras()
164
+
165
  torch_gc()
166
 
167
  if use_refine:
168
+ progress(0.5, desc="Refining")
169
  pipe.dit.enable_loras(['refinement_lora'])
170
  pipe.dit.enable_bsa()
171
+
172
+ frames = [(frame * 255).astype(np.uint8) for frame in output]
173
+ frames = [Image.fromarray(f) for f in frames]
174
+ ref_img = Image.fromarray(image) if mode == "i2v" else None
175
+
176
  output = pipe.generate_refine(
177
+ image=ref_img,
178
  prompt=prompt,
179
+ stage1_video=frames,
180
+ num_cond_frames=1 if mode == "i2v" else 0,
181
  num_inference_steps=50,
182
  generator=generator,
183
  )[0]
184
+
185
  pipe.dit.disable_all_loras()
186
  pipe.dit.disable_bsa()
187
  torch_gc()
188
 
189
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp:
190
+ export_to_video(output, tmp.name, fps=30)
191
+ return tmp.name
 
 
192
 
193
+
194
+ # -------------------- Gradio UI --------------------
195
+ css = ".fillable{max-width:960px !important}"
196
  with gr.Blocks(css=css) as demo:
197
  gr.Markdown("# 🎬 LongCat-Video")
198
  gr.Markdown("13.6B parameter dense video-generation model — [HuggingFace](https://huggingface.co/meituan-longcat/LongCat-Video)")
199
 
200
+ with gr.Tabs():
201
+ # --- T2V ---
202
  with gr.TabItem("Text-to-Video"):
203
  mode_t2v = gr.State("t2v")
204
  prompt_t2v = gr.Textbox(label="Prompt", lines=4)
205
+ neg_t2v = gr.Textbox(label="Negative Prompt", lines=2, value="ugly, blurry, low quality, static, subtitles")
206
+ height_t2v = gr.Slider(256, 1024, value=480, step=64, label="Height")
207
+ width_t2v = gr.Slider(256, 1024, value=832, step=64, label="Width")
208
+ seed_t2v = gr.Number(label="Seed", value=42)
209
  distill_t2v = gr.Checkbox(label="Use Distill Mode", value=True)
210
  refine_t2v = gr.Checkbox(label="Use Refine Mode", value=False)
211
+ duration_t2v = gr.Slider(1, 20, step=1, value=2, label="Duration (seconds)") # ✅ added
212
+
213
  t2v_button = gr.Button("Generate Video")
214
+ video_out_t2v = gr.Video(label="Generated Video")
215
 
216
  t2v_button.click(
217
  fn=generate_video,
218
+ inputs=[mode_t2v, prompt_t2v, neg_t2v, gr.State(None),
219
  height_t2v, width_t2v, gr.State(None),
220
  seed_t2v, distill_t2v, refine_t2v, duration_t2v],
221
+ outputs=video_out_t2v
222
  )
223
 
224
+ # --- I2V ---
225
  with gr.TabItem("Image-to-Video"):
226
  mode_i2v = gr.State("i2v")
227
  image_i2v = gr.Image(type="numpy", label="Input Image")
228
  prompt_i2v = gr.Textbox(label="Prompt", lines=4)
229
+ neg_i2v = gr.Textbox(label="Negative Prompt", lines=2, value="ugly, blurry, low quality, static, subtitles, watermark")
230
  resolution_i2v = gr.Dropdown(["480p", "720p"], value="480p", label="Resolution")
231
+ seed_i2v = gr.Number(label="Seed", value=42)
232
  distill_i2v = gr.Checkbox(label="Use Distill Mode", value=True)
233
  refine_i2v = gr.Checkbox(label="Use Refine Mode", value=False)
234
+ duration_i2v = gr.Slider(1, 20, step=1, value=2, label="Duration (seconds)") # ✅ added
235
+
236
  i2v_button = gr.Button("Generate Video")
237
+ video_out_i2v = gr.Video(label="Generated Video")
238
 
239
  i2v_button.click(
240
  fn=generate_video,
241
+ inputs=[mode_i2v, prompt_i2v, neg_i2v, image_i2v,
242
  gr.State(None), gr.State(None), resolution_i2v,
243
  seed_i2v, distill_i2v, refine_i2v, duration_i2v],
244
+ outputs=video_out_i2v
245
  )
246
 
247
  if __name__ == "__main__":