eeuuia commited on
Commit
9b8b3b0
·
verified ·
1 Parent(s): 28859c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -17
app.py CHANGED
@@ -53,7 +53,6 @@ except Exception as e:
53
  def run_generate_base_video(
54
  generation_mode: str, prompt: str, neg_prompt: str, start_img: str,
55
  height: int, width: int, duration: float,
56
- fp_guidance_preset: str, fp_guidance_scale_list: str, fp_stg_scale_list: str,
57
  fp_num_inference_steps: int, fp_skip_initial_steps: int, fp_skip_final_steps: int,
58
  progress=gr.Progress(track_tqdm=True)
59
  ) -> tuple:
@@ -70,9 +69,6 @@ def run_generate_base_video(
70
  )
71
 
72
  ltx_configs = {
73
- "guidance_preset": fp_guidance_preset,
74
- "guidance_scale_list": fp_guidance_scale_list,
75
- "stg_scale_list": fp_stg_scale_list,
76
  "num_inference_steps": fp_num_inference_steps,
77
  "skip_initial_inference_steps": fp_skip_initial_steps,
78
  "skip_final_inference_steps": fp_skip_final_steps,
@@ -173,16 +169,15 @@ def build_ui():
173
  def _build_generation_controls(ui: dict):
174
  """Constrói os componentes da UI para a Etapa 1: Geração."""
175
  gr.Markdown("### Configurações de Geração")
176
- ui['generation_mode'] = gr.Radio(label="Modo de Geração", choices=["Simples (Prompt Único)", "Narrativa (Múltiplos Prompts)"], value="Narrativa (Múltiplos Prompts)", info="Simples para uma ação contínua, Narrativa para uma sequência (uma cena por linha).")
177
  ui['prompt'] = gr.Textbox(label="Prompt(s)", value="Um leão majestoso caminha pela savana\nEle sobe em uma grande pedra e olha o horizonte", lines=4)
178
- ui['neg_prompt'] = gr.Textbox(label="Negative Prompt", value="blurry, low quality, bad anatomy, deformed", lines=2)
179
  ui['start_image'] = gr.Image(label="Imagem de Início (Opcional)", type="filepath", sources=["upload"])
180
 
181
  with gr.Accordion("Parâmetros Principais", open=True):
182
  ui['duration'] = gr.Slider(label="Duração Total (s)", value=4, step=1, minimum=1, maximum=30)
183
  with gr.Row():
184
- ui['height'] = gr.Slider(label="Height", value=432, step=16, minimum=256, maximum=1024)
185
- ui['width'] = gr.Slider(label="Width", value=768, step=16, minimum=256, maximum=1024)
186
 
187
  with gr.Accordion("Opções Avançadas LTX", open=False):
188
  gr.Markdown("#### Configurações de Passos de Inferência (First Pass)")
@@ -191,14 +186,7 @@ def _build_generation_controls(ui: dict):
191
  ui['fp_skip_initial_steps'] = gr.Slider(label="Pular Passos Iniciais", minimum=0, maximum=100, step=1, value=0)
192
  ui['fp_skip_final_steps'] = gr.Slider(label="Pular Passos Finais", minimum=0, maximum=100, step=1, value=0)
193
 
194
- with gr.Tabs():
195
- with gr.TabItem("Configurações de Guiagem (First Pass)"):
196
- ui['fp_guidance_preset'] = gr.Dropdown(label="Preset de Guiagem", choices=["Padrão (Recomendado)", "Agressivo", "Suave", "Customizado"], value="Padrão (Recomendado)", info="Controla o comportamento da guiagem durante a difusão.")
197
- with gr.Group(visible=False) as ui['custom_guidance_group']:
198
- gr.Markdown("⚠️ Edite as listas em formato JSON. Ex: `[1.0, 2.5, 3.0]`")
199
- ui['fp_guidance_scale_list'] = gr.Textbox(label="Lista de Guidance Scale", value="[1, 1, 6, 8, 6, 1, 1]")
200
- ui['fp_stg_scale_list'] = gr.Textbox(label="Lista de STG Scale (Movimento)", value="[0, 0, 4, 4, 4, 2, 1]")
201
-
202
  ui['generate_low_btn'] = gr.Button("1. Gerar Vídeo Base", variant="primary")
203
 
204
  def _build_postprod_controls(ui: dict):
@@ -244,7 +232,6 @@ def _register_event_handlers(app_state: gr.State, ui: dict):
244
  gen_inputs = [
245
  ui['generation_mode'], ui['prompt'], ui['neg_prompt'], ui['start_image'],
246
  ui['height'], ui['width'], ui['duration'],
247
- ui['fp_guidance_preset'], ui['fp_guidance_scale_list'], ui['fp_stg_scale_list'],
248
  ui['fp_num_inference_steps'], ui['fp_skip_initial_steps'], ui['fp_skip_final_steps'],
249
  ]
250
  gen_outputs = [ui['low_res_video_output'], app_state, ui['post_prod_group']]
 
53
  def run_generate_base_video(
54
  generation_mode: str, prompt: str, neg_prompt: str, start_img: str,
55
  height: int, width: int, duration: float,
 
56
  fp_num_inference_steps: int, fp_skip_initial_steps: int, fp_skip_final_steps: int,
57
  progress=gr.Progress(track_tqdm=True)
58
  ) -> tuple:
 
69
  )
70
 
71
  ltx_configs = {
 
 
 
72
  "num_inference_steps": fp_num_inference_steps,
73
  "skip_initial_inference_steps": fp_skip_initial_steps,
74
  "skip_final_inference_steps": fp_skip_final_steps,
 
169
  def _build_generation_controls(ui: dict):
170
  """Constrói os componentes da UI para a Etapa 1: Geração."""
171
  gr.Markdown("### Configurações de Geração")
 
172
  ui['prompt'] = gr.Textbox(label="Prompt(s)", value="Um leão majestoso caminha pela savana\nEle sobe em uma grande pedra e olha o horizonte", lines=4)
173
+ ui['neg_prompt'] = gr.Textbox(visible=False label="Negative Prompt", value="blurry, low quality, bad anatomy, deformed", lines=2)
174
  ui['start_image'] = gr.Image(label="Imagem de Início (Opcional)", type="filepath", sources=["upload"])
175
 
176
  with gr.Accordion("Parâmetros Principais", open=True):
177
  ui['duration'] = gr.Slider(label="Duração Total (s)", value=4, step=1, minimum=1, maximum=30)
178
  with gr.Row():
179
+ ui['height'] = gr.Slider(label="Height", value=480, step=32, minimum=256, maximum=1024)
180
+ ui['width'] = gr.Slider(label="Width", value=720, step=32, minimum=256, maximum=1024)
181
 
182
  with gr.Accordion("Opções Avançadas LTX", open=False):
183
  gr.Markdown("#### Configurações de Passos de Inferência (First Pass)")
 
186
  ui['fp_skip_initial_steps'] = gr.Slider(label="Pular Passos Iniciais", minimum=0, maximum=100, step=1, value=0)
187
  ui['fp_skip_final_steps'] = gr.Slider(label="Pular Passos Finais", minimum=0, maximum=100, step=1, value=0)
188
 
189
+
 
 
 
 
 
 
 
190
  ui['generate_low_btn'] = gr.Button("1. Gerar Vídeo Base", variant="primary")
191
 
192
  def _build_postprod_controls(ui: dict):
 
232
  gen_inputs = [
233
  ui['generation_mode'], ui['prompt'], ui['neg_prompt'], ui['start_image'],
234
  ui['height'], ui['width'], ui['duration'],
 
235
  ui['fp_num_inference_steps'], ui['fp_skip_initial_steps'], ui['fp_skip_final_steps'],
236
  ]
237
  gen_outputs = [ui['low_res_video_output'], app_state, ui['post_prod_group']]