Eueuiaa commited on
Commit
465c791
·
verified ·
1 Parent(s): 320a8a3

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +43 -8
api/ltx_server_refactored.py CHANGED
@@ -270,7 +270,12 @@ class VideoService:
270
  # ==============================================================================
271
  # --- FUNÇÃO #1: GERADOR DE CHUNK ÚNICO (AUXILIAR INTERNA) ---
272
  # ==============================================================================
273
- def _generate_single_chunk_low(self, prompt, negative_prompt, height, width, num_frames, guidance_scale, seed, initial_latent_condition=None, image_conditions=None, ltx_configs_override=None):
 
 
 
 
 
274
  """
275
  [NÓ DE GERAÇÃO]
276
  Gera um ÚNICO chunk de latentes brutos. Esta é a unidade de trabalho fundamental.
@@ -304,7 +309,7 @@ class VideoService:
304
  try:
305
  first_pass_config["guidance_scale"] = json.loads(ltx_configs_override["guidance_scale_list"])
306
  first_pass_config["stg_scale"] = json.loads(ltx_configs_override["stg_scale_list"])
307
- first_pass_config["guidance_timesteps"] = json.loads(ltx_configs_override["timesteps_list"])
308
  except Exception as e:
309
  print(f" > ERRO ao parsear valores customizados: {e}. Usando Padrão como fallback.")
310
  elif preset == "Agressivo":
@@ -314,9 +319,21 @@ class VideoService:
314
  first_pass_config["guidance_scale"] = [1, 1, 4, 5, 4, 1, 1]
315
  first_pass_config["stg_scale"] = [0, 0, 2, 2, 2, 1, 0]
316
 
317
- if "first_pass_num_inference_steps" in ltx_configs_override:
 
 
318
  first_pass_config["num_inference_steps"] = ltx_configs_override["first_pass_num_inference_steps"]
319
-
 
 
 
 
 
 
 
 
 
 
320
  first_pass_kwargs = {
321
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
322
  "num_frames": num_frames, "frame_rate": 24, "generator": generator, "output_type": "latent",
@@ -335,7 +352,12 @@ class VideoService:
335
  # ==============================================================================
336
  # --- FUNÇÃO #2: ORQUESTRADOR NARRATIVO (MÚLTIPLOS PROMPTS) ---
337
  # ==============================================================================
338
- def generate_narrative_low(self, prompt: str, negative_prompt, height, width, duration, guidance_scale, seed, initial_image_conditions=None, overlap_frames: int = 8, ltx_configs_override: dict = None):
 
 
 
 
 
339
  """
340
  [ORQUESTRADOR NARRATIVO]
341
  Gera um vídeo em múltiplos chunks sequenciais a partir de um prompt com várias linhas.
@@ -353,6 +375,7 @@ class VideoService:
353
  if num_chunks == 0: raise ValueError("O prompt está vazio ou não contém linhas válidas.")
354
 
355
  total_actual_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
 
356
 
357
  if num_chunks > 1:
358
  total_blocks = (total_actual_frames - 1) // 8
@@ -396,6 +419,7 @@ class VideoService:
396
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
397
  num_frames=num_frames_para_gerar, guidance_scale=guidance_scale, seed=used_seed + i,
398
  initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
 
399
  ltx_configs_override=ltx_configs_override
400
  )
401
 
@@ -428,7 +452,12 @@ class VideoService:
428
  # ==============================================================================
429
  # --- FUNÇÃO #3: ORQUESTRADOR SIMPLES (PROMPT ÚNICO) ---
430
  # ==============================================================================
431
- def generate_single_low(self, prompt: str, negative_prompt, height, width, duration, guidance_scale, seed, initial_image_conditions=None, ltx_configs_override: dict = None):
 
 
 
 
 
432
  """
433
  [ORQUESTRADOR SIMPLES]
434
  Gera um vídeo completo em um único chunk. Ideal para prompts simples e curtos.
@@ -450,7 +479,9 @@ class VideoService:
450
  final_latents = self._generate_single_chunk_low(
451
  prompt=prompt, negative_prompt=negative_prompt, height=height, width=width,
452
  num_frames=total_actual_frames, guidance_scale=guidance_scale, seed=used_seed,
453
- image_conditions=initial_image_conditions, ltx_configs_override=ltx_configs_override
 
 
454
  )
455
 
456
  print("\n--- Finalizando Geração Simples: Salvando e decodificando ---")
@@ -469,7 +500,11 @@ class VideoService:
469
  # ==============================================================================
470
  # --- FUNÇÃO #4: ORQUESTRADOR (Upscaler + texturas hd) ---
471
  # ==============================================================================
472
- def generate_upscale_denoise(self, latents_path, prompt, negative_prompt, guidance_scale, seed):
 
 
 
 
473
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
474
  seed_everething(used_seed)
475
  temp_dir = tempfile.mkdtemp(prefix="ltxv_up_"); self._register_tmp_dir(temp_dir)
 
270
  # ==============================================================================
271
  # --- FUNÇÃO #1: GERADOR DE CHUNK ÚNICO (AUXILIAR INTERNA) ---
272
  # ==============================================================================
273
+ def _generate_single_chunk_low(
274
+ self, prompt, negative_prompt,
275
+ height, width, num_frames, guidance_scale,
276
+ seed, initial_latent_condition=None, image_conditions=None,
277
+ fp_num_inference_steps:int=30, ship_initial_inference_steps:int=0, ship_final_inference_steps:int=0,
278
+ ltx_configs_override=None):
279
  """
280
  [NÓ DE GERAÇÃO]
281
  Gera um ÚNICO chunk de latentes brutos. Esta é a unidade de trabalho fundamental.
 
309
  try:
310
  first_pass_config["guidance_scale"] = json.loads(ltx_configs_override["guidance_scale_list"])
311
  first_pass_config["stg_scale"] = json.loads(ltx_configs_override["stg_scale_list"])
312
+ #first_pass_config["guidance_timesteps"] = json.loads(ltx_configs_override["timesteps_list"])
313
  except Exception as e:
314
  print(f" > ERRO ao parsear valores customizados: {e}. Usando Padrão como fallback.")
315
  elif preset == "Agressivo":
 
319
  first_pass_config["guidance_scale"] = [1, 1, 4, 5, 4, 1, 1]
320
  first_pass_config["stg_scale"] = [0, 0, 2, 2, 2, 1, 0]
321
 
322
+ if fp_num_inference_steps>0:
323
+ first_pass_config["num_inference_steps"] = fp_num_inference_steps
324
+ else:
325
  first_pass_config["num_inference_steps"] = ltx_configs_override["first_pass_num_inference_steps"]
326
+
327
+ if ship_initial_inference_steps>0:
328
+ first_pass_config["skip_initial_inference_steps"] = ship_initial_inference_steps
329
+ else:
330
+ first_pass_config["skip_initial_inference_steps"] = ltx_configs_override["skip_initial_inference_steps"]
331
+
332
+ if ship_final_inference_steps>0:
333
+ first_pass_config["skip_final_inference_steps"] = ship_final_inference_steps
334
+ else:
335
+ first_pass_config["skip_final_inference_steps"] = ltx_configs_override["skip_final_inference_steps"]
336
+
337
  first_pass_kwargs = {
338
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
339
  "num_frames": num_frames, "frame_rate": 24, "generator": generator, "output_type": "latent",
 
352
  # ==============================================================================
353
  # --- FUNÇÃO #2: ORQUESTRADOR NARRATIVO (MÚLTIPLOS PROMPTS) ---
354
  # ==============================================================================
355
+ def generate_narrative_low(
356
+ self, prompt: str, negative_prompt,
357
+ height, width, duration, guidance_scale,
358
+ seed, initial_image_conditions=None, overlap_frames: int = 8,
359
+ fp_num_inference_steps:int=30, ship_initial_inference_steps:int=0, ship_final_inference_steps:int=0,
360
+ ltx_configs_override: dict = None):
361
  """
362
  [ORQUESTRADOR NARRATIVO]
363
  Gera um vídeo em múltiplos chunks sequenciais a partir de um prompt com várias linhas.
 
375
  if num_chunks == 0: raise ValueError("O prompt está vazio ou não contém linhas válidas.")
376
 
377
  total_actual_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
378
+
379
 
380
  if num_chunks > 1:
381
  total_blocks = (total_actual_frames - 1) // 8
 
419
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
420
  num_frames=num_frames_para_gerar, guidance_scale=guidance_scale, seed=used_seed + i,
421
  initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
422
+ fp_num_inference_steps=fp_num_inference_steps, ship_initial_inference_steps=ship_initial_inference_steps, ship_final_inference_steps=ship_final_inference_steps,
423
  ltx_configs_override=ltx_configs_override
424
  )
425
 
 
452
  # ==============================================================================
453
  # --- FUNÇÃO #3: ORQUESTRADOR SIMPLES (PROMPT ÚNICO) ---
454
  # ==============================================================================
455
+ def generate_single_low(
456
+ self, prompt: str, negative_prompt,
457
+ height, width, duration, guidance_scale,
458
+ seed, initial_image_conditions=None,
459
+ fp_num_inference_steps:int=30, ship_initial_inference_steps:int=0, ship_final_inference_steps:int=0,
460
+ ltx_configs_override: dict = None):
461
  """
462
  [ORQUESTRADOR SIMPLES]
463
  Gera um vídeo completo em um único chunk. Ideal para prompts simples e curtos.
 
479
  final_latents = self._generate_single_chunk_low(
480
  prompt=prompt, negative_prompt=negative_prompt, height=height, width=width,
481
  num_frames=total_actual_frames, guidance_scale=guidance_scale, seed=used_seed,
482
+ image_conditions=initial_image_conditions,
483
+ fp_num_inference_steps=fp_num_inference_steps, ship_initial_inference_steps=ship_initial_inference_steps, ship_final_inference_steps=ship_final_inference_steps,
484
+ ltx_configs_override=ltx_configs_override
485
  )
486
 
487
  print("\n--- Finalizando Geração Simples: Salvando e decodificando ---")
 
500
  # ==============================================================================
501
  # --- FUNÇÃO #4: ORQUESTRADOR (Upscaler + texturas hd) ---
502
  # ==============================================================================
503
+ def generate_upscale_denoise(
504
+ self, latents_path, prompt, negative_prompt,
505
+ guidance_scale, seed,
506
+ fp_num_inference_steps:int=30, ship_initial_inference_steps:int=0, ship_final_inference_steps:int=0
507
+ ):
508
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
509
  seed_everething(used_seed)
510
  temp_dir = tempfile.mkdtemp(prefix="ltxv_up_"); self._register_tmp_dir(temp_dir)