Eueuiaa commited on
Commit
0d74cea
·
verified ·
1 Parent(s): fa6a0b1

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +5 -16
api/ltx_server_refactored.py CHANGED
@@ -281,19 +281,15 @@ class VideoService:
281
  height_padded = ((height - 1) // 8 + 1) * 8
282
  width_padded = ((width - 1) // 8 + 1) * 8
283
  generator = torch.Generator(device=self.device).manual_seed(seed)
284
-
285
  downscale_factor = self.config.get("downscale_factor", 0.6666666)
286
  vae_scale_factor = self.pipeline.vae_scale_factor
287
-
288
  x_width = int(width_padded * downscale_factor)
289
  downscaled_width = x_width - (x_width % vae_scale_factor)
290
  x_height = int(height_padded * downscale_factor)
291
  downscaled_height = x_height - (x_height % vae_scale_factor)
292
-
293
  all_conditions = ltx_configs_override.get("conditioning_items", [])
294
-
295
  pipeline_kwargs = self.config.get("first_pass", {}).copy()
296
-
297
  if ltx_configs_override:
298
  print("[DEBUG] Sobrepondo configurações do LTX com valores da UI...")
299
  preset = ltx_configs_override.get("guidance_preset")
@@ -357,12 +353,8 @@ class VideoService:
357
  if num_chunks == 0: raise ValueError("O prompt está vazio ou não contém linhas válidas.")
358
 
359
  total_actual_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
360
-
361
-
362
-
363
  frames_per_chunk = max(9, total_actual_frames)
364
  frames_per_chunk_last = max(9, total_actual_frames)
365
-
366
  poda_latents_num = overlap_frames
367
 
368
  latentes_chunk_video = []
@@ -390,13 +382,11 @@ class VideoService:
390
  if condition_item_latent_overlap: current_conditions.append(condition_item_latent_overlap)
391
  ltx_configs_override["conditioning_items"] = current_conditions
392
 
393
- num_frames_para_gerar = frames_per_chunk_last if i == num_chunks - 1 else frames_per_chunk
394
- if i > 0 and poda_latents_num > 0:
395
- num_frames_para_gerar += overlap_frames
396
 
397
  latentes_bruto = self._generate_single_chunk_low(
398
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
399
- num_frames=num_frames_para_gerar, seed=used_seed + i,
400
  ltx_configs_override=ltx_configs_override
401
  )
402
 
@@ -405,12 +395,11 @@ class VideoService:
405
  self.finalize(keep_paths=[])
406
  return None, None, None
407
 
408
- if i > 0 and poda_latents_num > 0:
409
- latentes_bruto = latentes_bruto[:, :, poda_latents_num:, :, :]
410
 
411
  latentes_podado = latentes_bruto.clone().detach()
412
  if i < num_chunks - 1 and poda_latents_num > 0:
413
- latentes_podado = latentes_bruto[:, :, :-poda_latents_num, :, :].clone()
414
  overlap_latents = latentes_bruto[:, :, -poda_latents_num:, :, :].clone()
415
  condition_item_latent_overlap = ConditioningItem(
416
  media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0
 
281
  height_padded = ((height - 1) // 8 + 1) * 8
282
  width_padded = ((width - 1) // 8 + 1) * 8
283
  generator = torch.Generator(device=self.device).manual_seed(seed)
 
284
  downscale_factor = self.config.get("downscale_factor", 0.6666666)
285
  vae_scale_factor = self.pipeline.vae_scale_factor
 
286
  x_width = int(width_padded * downscale_factor)
287
  downscaled_width = x_width - (x_width % vae_scale_factor)
288
  x_height = int(height_padded * downscale_factor)
289
  downscaled_height = x_height - (x_height % vae_scale_factor)
290
+
291
  all_conditions = ltx_configs_override.get("conditioning_items", [])
 
292
  pipeline_kwargs = self.config.get("first_pass", {}).copy()
 
293
  if ltx_configs_override:
294
  print("[DEBUG] Sobrepondo configurações do LTX com valores da UI...")
295
  preset = ltx_configs_override.get("guidance_preset")
 
353
  if num_chunks == 0: raise ValueError("O prompt está vazio ou não contém linhas válidas.")
354
 
355
  total_actual_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
 
 
 
356
  frames_per_chunk = max(9, total_actual_frames)
357
  frames_per_chunk_last = max(9, total_actual_frames)
 
358
  poda_latents_num = overlap_frames
359
 
360
  latentes_chunk_video = []
 
382
  if condition_item_latent_overlap: current_conditions.append(condition_item_latent_overlap)
383
  ltx_configs_override["conditioning_items"] = current_conditions
384
 
385
+ num_frames_para_gerar = frames_per_chunk_last
 
 
386
 
387
  latentes_bruto = self._generate_single_chunk_low(
388
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
389
+ num_frames=num_frames_para_gerar, seed=used_seed,
390
  ltx_configs_override=ltx_configs_override
391
  )
392
 
 
395
  self.finalize(keep_paths=[])
396
  return None, None, None
397
 
398
+ if i > 0:
399
+ latentes_bruto = latentes_bruto[:, :, poda_latents_num:, :, :]
400
 
401
  latentes_podado = latentes_bruto.clone().detach()
402
  if i < num_chunks - 1 and poda_latents_num > 0:
 
403
  overlap_latents = latentes_bruto[:, :, -poda_latents_num:, :, :].clone()
404
  condition_item_latent_overlap = ConditioningItem(
405
  media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0