Carlos s commited on
Commit
690fc1d
verified
1 Parent(s): 2e1e83b

Update api/ltx_server.py

Browse files
Files changed (1) hide show
  1. api/ltx_server.py +8 -8
api/ltx_server.py CHANGED
@@ -60,9 +60,9 @@ def _query_gpu_processes_via_nvidiasmi(device_index: int) -> List[Dict]:
60
  parts = [p.strip() for p in line.split(",")]
61
  if len(parts) >= 3:
62
  try:
63
- pid = int(parts[^20_0])
64
- name = parts[^20_1]
65
- used_mb = int(parts[^20_2])
66
  user = "unknown"
67
  try:
68
  import psutil
@@ -351,7 +351,7 @@ class VideoService:
351
  padding_values, progress_callback=None):
352
  pad_left, pad_right, pad_top, pad_bottom = padding_values
353
  with imageio.get_writer(output_video_path, fps=frame_rate, codec="libx264", quality=8) as writer:
354
- T = latents.shape[^20_2]
355
  for i in range(T):
356
  latent_chw = latents[0, :, i].to(self.device)
357
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext():
@@ -362,13 +362,13 @@ class VideoService:
362
  pixel_bchw = self.pipeline.vae.decode(latent_chw.unsqueeze(0))
363
  else:
364
  raise RuntimeError("Pipeline n茫o exp玫e decode_latents nem vae.decode para decodificar latentes.")
365
- pixel_chw = pixel_bchw[^20_0]
366
  if pixel_chw.min() < 0:
367
  pixel_chw = (pixel_chw.clamp(-1, 1) + 1.0) / 2.0
368
  else:
369
  pixel_chw = pixel_chw.clamp(0, 1)
370
- H = pixel_chw.shape[^20_1]
371
- W = pixel_chw.shape[^20_2]
372
  h_end = H - pad_bottom if pad_bottom > 0 else H
373
  w_end = W - pad_right if pad_right > 0 else W
374
  pixel_chw = pixel_chw[:, pad_top:h_end, pad_left:w_end]
@@ -566,7 +566,7 @@ class VideoService:
566
  result_tensor = result_tensor[:, :, :actual_num_frames, pad_top:slice_h_end, pad_left:slice_w_end]
567
  log_tensor_info(result_tensor, "Tensor Final (Ap贸s P贸s-processamento, Antes de Salvar)")
568
  with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], codec="libx264", quality=8) as writer:
569
- T = result_tensor.shape[^20_2]
570
  for i in range(T):
571
  frame_chw = result_tensor[0, :, i]
572
  frame_hwc_u8 = (frame_chw.permute(1, 2, 0)
 
60
  parts = [p.strip() for p in line.split(",")]
61
  if len(parts) >= 3:
62
  try:
63
+ pid = int(parts[0])
64
+ name = parts[1]
65
+ used_mb = int(parts[2])
66
  user = "unknown"
67
  try:
68
  import psutil
 
351
  padding_values, progress_callback=None):
352
  pad_left, pad_right, pad_top, pad_bottom = padding_values
353
  with imageio.get_writer(output_video_path, fps=frame_rate, codec="libx264", quality=8) as writer:
354
+ T = latents.shape[2]
355
  for i in range(T):
356
  latent_chw = latents[0, :, i].to(self.device)
357
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext():
 
362
  pixel_bchw = self.pipeline.vae.decode(latent_chw.unsqueeze(0))
363
  else:
364
  raise RuntimeError("Pipeline n茫o exp玫e decode_latents nem vae.decode para decodificar latentes.")
365
+ pixel_chw = pixel_bchw[0]
366
  if pixel_chw.min() < 0:
367
  pixel_chw = (pixel_chw.clamp(-1, 1) + 1.0) / 2.0
368
  else:
369
  pixel_chw = pixel_chw.clamp(0, 1)
370
+ H = pixel_chw.shape[1]
371
+ W = pixel_chw.shape[2]
372
  h_end = H - pad_bottom if pad_bottom > 0 else H
373
  w_end = W - pad_right if pad_right > 0 else W
374
  pixel_chw = pixel_chw[:, pad_top:h_end, pad_left:w_end]
 
566
  result_tensor = result_tensor[:, :, :actual_num_frames, pad_top:slice_h_end, pad_left:slice_w_end]
567
  log_tensor_info(result_tensor, "Tensor Final (Ap贸s P贸s-processamento, Antes de Salvar)")
568
  with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], codec="libx264", quality=8) as writer:
569
+ T = result_tensor.shape[2]
570
  for i in range(T):
571
  frame_chw = result_tensor[0, :, i]
572
  frame_hwc_u8 = (frame_chw.permute(1, 2, 0)