Shalmoni commited on
Commit
184ddd2
Β·
verified Β·
1 Parent(s): a4f9434

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -60
app.py CHANGED
@@ -41,8 +41,8 @@ def ensure_project(p, suggested_name="Project"):
41
  name = f"{suggested_name}-{pid[:4]}"
42
  proj = {
43
  "meta": {"id": pid, "name": name, "created": now_iso(), "updated": now_iso()},
44
- "shots": [], # each shot: id,title,description,duration,fps,steps,seed,negative,image_path?
45
- "clips": []
46
  }
47
  save_project(proj)
48
  return proj
@@ -65,19 +65,17 @@ def _lazy_model_tok():
65
 
66
  _tokenizer = AutoTokenizer.from_pretrained(STORYBOARD_MODEL, trust_remote_code=True)
67
 
68
- # Choose a dtype that works both locally and on ZeroGPU
69
  use_cuda = torch.cuda.is_available()
70
- preferred_dtype = torch.float16 if use_cuda else torch.float32 # torch.bfloat16 is also fine if supported
71
 
72
  _model = AutoModelForCausalLM.from_pretrained(
73
  STORYBOARD_MODEL,
74
  device_map="auto",
75
- torch_dtype=preferred_dtype, # βœ… FIXED: use torch_dtype
76
  trust_remote_code=True,
77
  use_safetensors=True
78
  )
79
 
80
- # Ensure pad token to avoid warnings
81
  if _tokenizer.pad_token_id is None and _tokenizer.eos_token_id is not None:
82
  _tokenizer.pad_token_id = _tokenizer.eos_token_id
83
 
@@ -115,7 +113,7 @@ def _prompt_minimal(user_prompt: str, n_shots: int, default_fps: int, default_le
115
  f" \"fps\": {default_fps},\n"
116
  " \"steps\": 30,\n"
117
  " \"seed\": null,\n"
118
- ' \"negative\": \"\"\n'
119
  "}\n"
120
  )
121
 
@@ -130,7 +128,6 @@ def _apply_chat(tok, system_msg: str, user_msg: str) -> str:
130
  return system_msg + "\n\n" + user_msg
131
 
132
  def _generate_text(model, tok, prompt_text: str) -> str:
133
- """Decode only the continuation (avoid prompt echo)."""
134
  inputs = tok(prompt_text, return_tensors="pt")
135
  inputs = {k: v.to(model.device) for k, v in inputs.items()}
136
  eos_id = tok.eos_token_id or tok.pad_token_id
@@ -192,13 +189,11 @@ def generate_storyboard_with_llm(user_prompt: str, n_shots: int, default_fps: in
192
  model, tok = _lazy_model_tok()
193
  system = "You are a film previsualization assistant. Output must be valid JSON."
194
 
195
- # Pass 1
196
  p1 = _apply_chat(tok, system + " Return ONLY JSON inside <JSON> tags.",
197
  _prompt_with_tags(user_prompt, n_shots, default_fps, default_len))
198
  out1 = _generate_text(model, tok, p1)
199
  json_text = _extract_json_array(out1)
200
 
201
- # Pass 2
202
  if not json_text:
203
  p2 = _apply_chat(tok, system + " Reply ONLY with a JSON array.",
204
  _prompt_minimal(user_prompt, n_shots, default_fps, default_len))
@@ -209,7 +204,6 @@ def generate_storyboard_with_llm(user_prompt: str, n_shots: int, default_fps: in
209
  if start != -1 and end != -1 and end > start:
210
  json_text = out2[start:end+1].strip()
211
 
212
- # Empty fallback
213
  if not json_text or not json_text.strip():
214
  fallback = []
215
  for i in range(1, int(n_shots) + 1):
@@ -235,9 +229,8 @@ def generate_storyboard_with_llm(user_prompt: str, n_shots: int, default_fps: in
235
  return _normalize_shots(shots_raw, default_fps, default_len)
236
 
237
  # =========================
238
- # IMAGE GEN (ZeroGPU) β€” SD1.5 text2img + img2img chaining
239
  # =========================
240
-
241
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
242
 
243
  SD_MODEL = os.getenv("SD_MODEL", "stabilityai/sd-turbo")
@@ -245,20 +238,13 @@ _sd_t2i = None
245
  _sd_i2i = None
246
 
247
  def _lazy_sd_pipes():
248
- """
249
- Load SD once in a version-safe way:
250
- - torch_dtype (not dtype)
251
- - low_cpu_mem_usage=False to avoid offload_state_dict kwarg
252
- - no revision pin (some repos don't have 'fp16' branch)
253
- - optional HF token if set (for gated models)
254
- """
255
  global _sd_t2i, _sd_i2i
256
  if _sd_t2i is not None and _sd_i2i is not None:
257
  return _sd_t2i, _sd_i2i
258
 
259
  use_cuda = torch.cuda.is_available()
260
  dtype = torch.float16 if use_cuda else torch.float32
261
- hf_token = os.getenv("HF_TOKEN", None) # add this in Space Secrets only if needed
262
 
263
  _sd_t2i = StableDiffusionPipeline.from_pretrained(
264
  SD_MODEL,
@@ -297,51 +283,75 @@ def generate_keyframe_image(
297
  pid: str,
298
  shot_idx: int,
299
  shots: list,
300
- guidance_scale: float = 7.5,
301
- strength: float = 0.35
 
 
 
 
302
  ):
303
  """
304
  Generate image for shots[shot_idx].
305
- - shot 0: text2img
306
- - shot k>0: img2img using previous approved image as conditioning (if available)
 
307
  """
308
  t2i, i2i = _lazy_sd_pipes()
309
  shot = shots[shot_idx]
310
- prompt = shot.get("description", "")
 
311
  negative = shot.get("negative") or ""
312
- steps = int(shot.get("steps", 30))
313
- seed = shot.get("seed", None)
314
 
315
- gen = torch.Generator("cuda" if torch.cuda.is_available() else "cpu")
 
316
  if isinstance(seed, int):
317
- gen = gen.manual_seed(seed)
 
 
 
318
 
319
  if shot_idx == 0 or not shots[shot_idx - 1].get("image_path"):
320
  out = t2i(
321
  prompt=prompt,
322
  negative_prompt=negative,
323
  guidance_scale=guidance_scale,
324
- num_inference_steps=steps,
325
- generator=gen
 
 
326
  ).images[0]
327
  else:
328
- prev_path = shots[shot_idx - 1]["image_path"]
329
- init_image = Image.open(prev_path).convert("RGB")
330
- out = i2i(
331
- prompt=prompt,
332
- negative_prompt=negative,
333
- image=init_image,
334
- guidance_scale=guidance_scale,
335
- strength=strength,
336
- num_inference_steps=steps,
337
- generator=gen
338
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
339
 
340
  saved_path = _save_keyframe(pid, int(shot["id"]), out)
341
  return saved_path
342
 
343
  # =========================
344
- # Shots <-> Dataframe utils
345
  # =========================
346
  SHOT_COLUMNS = ["id", "title", "description", "duration", "fps", "steps", "seed", "negative", "image_path"]
347
 
@@ -370,7 +380,7 @@ def df_to_shots(df: pd.DataFrame) -> list:
370
  # =========================
371
  with gr.Blocks() as demo:
372
  gr.Markdown("# 🎬 Storyboard β†’ Keyframes β†’ Videos β†’ Export")
373
- gr.Markdown("**Edit storyboard prompts**, then generate keyframes. Each next shot uses the **previous approved image** as reference.")
374
 
375
  # State
376
  project = gr.State(None)
@@ -406,6 +416,8 @@ with gr.Blocks() as demo:
406
  label="Edit shots below (prompts & params)", wrap=True
407
  )
408
  save_edits_btn = gr.Button("Save Edits βœ“", variant="primary", interactive=False)
 
 
409
  to_keyframes_btn = gr.Button("Start Keyframes β†’", variant="secondary")
410
 
411
  with gr.Tab("Keyframes"):
@@ -415,6 +427,11 @@ with gr.Blocks() as demo:
415
  with gr.Row():
416
  gen_btn = gr.Button("Generate / Regenerate", variant="primary")
417
  approve_next_btn = gr.Button("Approve & Next β†’", variant="secondary")
 
 
 
 
 
418
  with gr.Row():
419
  prev_img = gr.Image(label="Previous approved image (conditioning)", type="filepath")
420
  out_img = gr.Image(label="Generated image", type="filepath")
@@ -453,10 +470,7 @@ with gr.Blocks() as demo:
453
  outputs=[project, shots_df, sb_status, save_edits_btn]
454
  )
455
 
456
- # Defensive save handler (works even if user clicks too early)
457
- def on_save_edits(*args):
458
- p = args[0] if len(args) > 0 else None
459
- df = args[1] if len(args) > 1 else None
460
  if p is None:
461
  raise gr.Error("No project in memory. Click New Project, then generate a storyboard.")
462
  if df is None:
@@ -470,28 +484,79 @@ with gr.Blocks() as demo:
470
 
471
  save_edits_btn.click(on_save_edits, inputs=[project, shots_df], outputs=[project, sb_status])
472
 
473
- def on_start_keyframes(p, df):
474
  if p is None: raise gr.Error("No project.")
475
  shots = df_to_shots(df)
476
  if not shots: raise gr.Error("Storyboard is empty.")
477
- p = dict(p); p["shots"] = shots; p["meta"]["updated"] = now_iso(); save_project(p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478
  idx = 0
479
  prev_path = None
480
- info = f"**Shot {shots[idx]['id']} β€” {shots[idx]['title']}** \nDuration: {shots[idx]['duration']}s @ {shots[idx]['fps']} fps"
481
- return p, 0, gr.update(value=info), gr.update(value=shots[idx]["description"]), gr.update(value=prev_path), gr.update(value=None), gr.update(value="Ready to generate shot 1.")
 
 
 
 
482
 
483
- to_keyframes_btn.click(on_start_keyframes, inputs=[project, shots_df], outputs=[project, current_idx, shot_info_md, prompt_box, prev_img, out_img, kf_status])
 
 
 
 
484
 
485
- def on_generate_img(p, idx, current_prompt):
486
  if p is None: raise gr.Error("No project.")
487
  shots = p["shots"]
488
  if idx < 0 or idx >= len(shots): raise gr.Error("Invalid shot index.")
489
- shots[idx]["description"] = current_prompt # allow tweaking before generation
490
  prev_path = shots[idx-1]["image_path"] if idx > 0 else None
491
- img_path = generate_keyframe_image(p["meta"]["id"], int(idx), shots)
 
 
 
 
 
 
 
 
 
 
 
492
  return img_path, (prev_path or None), gr.update(value=f"Generated candidate for shot {shots[idx]['id']}.")
493
 
494
- gen_btn.click(on_generate_img, inputs=[project, current_idx, prompt_box], outputs=[out_img, prev_img, kf_status])
 
 
 
 
495
 
496
  def on_approve_next(p, idx, current_prompt, latest_img_path):
497
  if p is None: raise gr.Error("No project.")
@@ -499,6 +564,7 @@ with gr.Blocks() as demo:
499
  i = int(idx)
500
  if i < 0 or i >= len(shots): raise gr.Error("Invalid shot index.")
501
  if not latest_img_path: raise gr.Error("Generate an image first.")
 
502
  # commit
503
  shots[i]["description"] = current_prompt
504
  shots[i]["image_path"] = latest_img_path
@@ -509,7 +575,11 @@ with gr.Blocks() as demo:
509
  # next
510
  if i + 1 < len(shots):
511
  ni = i + 1
512
- info = f"**Shot {shots[ni]['id']} β€” {shots[ni]['title']}** \nDuration: {shots[ni]['duration']}s @ {shots[ni]['fps']} fps"
 
 
 
 
513
  prev_path = shots[ni-1]["image_path"]
514
  return p, ni, gr.update(value=info), gr.update(value=shots[ni]["description"]), gr.update(value=prev_path), gr.update(value=None), gr.update(value=f"Approved shot {shots[i]['id']}. On to shot {shots[ni]['id']}.")
515
  else:
@@ -527,13 +597,15 @@ with gr.Blocks() as demo:
527
 
528
  def on_load(file_obj):
529
  p = load_project_file(file_obj)
 
530
  return (
531
  p,
532
  gr.update(value=f"Loaded project `{p['meta']['name']}` (id: `{p['meta']['id']}`)"),
533
  shots_to_df(p.get("shots", [])),
 
534
  )
535
 
536
- load_btn.click(on_load, inputs=[load_file], outputs=[project, sb_status, shots_df])
537
 
538
  if __name__ == "__main__":
539
  demo.launch()
 
41
  name = f"{suggested_name}-{pid[:4]}"
42
  proj = {
43
  "meta": {"id": pid, "name": name, "created": now_iso(), "updated": now_iso()},
44
+ "shots": [], # each shot: id,title,description,duration,fps,steps,seed,negative,image_path
45
+ "clips": [],
46
  }
47
  save_project(proj)
48
  return proj
 
65
 
66
  _tokenizer = AutoTokenizer.from_pretrained(STORYBOARD_MODEL, trust_remote_code=True)
67
 
 
68
  use_cuda = torch.cuda.is_available()
69
+ preferred_dtype = torch.float16 if use_cuda else torch.float32
70
 
71
  _model = AutoModelForCausalLM.from_pretrained(
72
  STORYBOARD_MODEL,
73
  device_map="auto",
74
+ torch_dtype=preferred_dtype, # <- correct kwarg
75
  trust_remote_code=True,
76
  use_safetensors=True
77
  )
78
 
 
79
  if _tokenizer.pad_token_id is None and _tokenizer.eos_token_id is not None:
80
  _tokenizer.pad_token_id = _tokenizer.eos_token_id
81
 
 
113
  f" \"fps\": {default_fps},\n"
114
  " \"steps\": 30,\n"
115
  " \"seed\": null,\n"
116
+ ' \"negative\": \"\"\n"
117
  "}\n"
118
  )
119
 
 
128
  return system_msg + "\n\n" + user_msg
129
 
130
  def _generate_text(model, tok, prompt_text: str) -> str:
 
131
  inputs = tok(prompt_text, return_tensors="pt")
132
  inputs = {k: v.to(model.device) for k, v in inputs.items()}
133
  eos_id = tok.eos_token_id or tok.pad_token_id
 
189
  model, tok = _lazy_model_tok()
190
  system = "You are a film previsualization assistant. Output must be valid JSON."
191
 
 
192
  p1 = _apply_chat(tok, system + " Return ONLY JSON inside <JSON> tags.",
193
  _prompt_with_tags(user_prompt, n_shots, default_fps, default_len))
194
  out1 = _generate_text(model, tok, p1)
195
  json_text = _extract_json_array(out1)
196
 
 
197
  if not json_text:
198
  p2 = _apply_chat(tok, system + " Reply ONLY with a JSON array.",
199
  _prompt_minimal(user_prompt, n_shots, default_fps, default_len))
 
204
  if start != -1 and end != -1 and end > start:
205
  json_text = out2[start:end+1].strip()
206
 
 
207
  if not json_text or not json_text.strip():
208
  fallback = []
209
  for i in range(1, int(n_shots) + 1):
 
229
  return _normalize_shots(shots_raw, default_fps, default_len)
230
 
231
  # =========================
232
+ # IMAGE GEN (ZeroGPU) β€” sd-turbo t2i + img2img chaining
233
  # =========================
 
234
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
235
 
236
  SD_MODEL = os.getenv("SD_MODEL", "stabilityai/sd-turbo")
 
238
  _sd_i2i = None
239
 
240
  def _lazy_sd_pipes():
 
 
 
 
 
 
 
241
  global _sd_t2i, _sd_i2i
242
  if _sd_t2i is not None and _sd_i2i is not None:
243
  return _sd_t2i, _sd_i2i
244
 
245
  use_cuda = torch.cuda.is_available()
246
  dtype = torch.float16 if use_cuda else torch.float32
247
+ hf_token = os.getenv("HF_TOKEN", None)
248
 
249
  _sd_t2i = StableDiffusionPipeline.from_pretrained(
250
  SD_MODEL,
 
283
  pid: str,
284
  shot_idx: int,
285
  shots: list,
286
+ t2i_steps: int = 6, # first shot
287
+ i2i_steps: int = 10, # subsequent shots
288
+ i2i_strength: float = 0.65, # change vs consistency
289
+ guidance_scale: float = 0.5,
290
+ width: int = 512,
291
+ height: int = 512
292
  ):
293
  """
294
  Generate image for shots[shot_idx].
295
+ - shot 0: text2img (few steps)
296
+ - shot k>0: img2img from previous approved image with higher strength/steps
297
+ Seed is kept SAME across all shots (stored in shots[i]['seed']).
298
  """
299
  t2i, i2i = _lazy_sd_pipes()
300
  shot = shots[shot_idx]
301
+
302
+ prompt = (shot.get("description") or "").strip()
303
  negative = shot.get("negative") or ""
304
+ seed = shot.get("seed", None)
 
305
 
306
+ device = "cuda" if torch.cuda.is_available() else "cpu"
307
+ gen = torch.Generator(device)
308
  if isinstance(seed, int):
309
+ gen = gen.manual_seed(int(seed))
310
+
311
+ width = max(256, min(1024, int(width)))
312
+ height = max(256, min(1024, int(height)))
313
 
314
  if shot_idx == 0 or not shots[shot_idx - 1].get("image_path"):
315
  out = t2i(
316
  prompt=prompt,
317
  negative_prompt=negative,
318
  guidance_scale=guidance_scale,
319
+ num_inference_steps=int(max(1, t2i_steps)),
320
+ generator=gen,
321
+ width=width,
322
+ height=height
323
  ).images[0]
324
  else:
325
+ prev_path = shots[shot_idx - 1].get("image_path")
326
+ if prev_path and os.path.exists(prev_path):
327
+ init_image = Image.open(prev_path).convert("RGB")
328
+ strength = float(i2i_strength)
329
+ strength = min(max(strength, 0.50), 0.90)
330
+ out = i2i(
331
+ prompt=prompt,
332
+ negative_prompt=negative,
333
+ image=init_image,
334
+ guidance_scale=guidance_scale,
335
+ strength=strength,
336
+ num_inference_steps=int(max(2, i2i_steps)),
337
+ generator=gen
338
+ ).images[0]
339
+ else:
340
+ out = t2i(
341
+ prompt=prompt,
342
+ negative_prompt=negative,
343
+ guidance_scale=guidance_scale,
344
+ num_inference_steps=int(max(1, t2i_steps)),
345
+ generator=gen,
346
+ width=width,
347
+ height=height
348
+ ).images[0]
349
 
350
  saved_path = _save_keyframe(pid, int(shot["id"]), out)
351
  return saved_path
352
 
353
  # =========================
354
+ # Shots <-> DataFrame utils
355
  # =========================
356
  SHOT_COLUMNS = ["id", "title", "description", "duration", "fps", "steps", "seed", "negative", "image_path"]
357
 
 
380
  # =========================
381
  with gr.Blocks() as demo:
382
  gr.Markdown("# 🎬 Storyboard β†’ Keyframes β†’ Videos β†’ Export")
383
+ gr.Markdown("Edit storyboard prompts, then generate keyframes. Shots 2+ use the previous approved image for consistency. A single project seed is locked for a cohesive look.")
384
 
385
  # State
386
  project = gr.State(None)
 
416
  label="Edit shots below (prompts & params)", wrap=True
417
  )
418
  save_edits_btn = gr.Button("Save Edits βœ“", variant="primary", interactive=False)
419
+ with gr.Row():
420
+ proj_seed_box = gr.Number(label="Project Seed (locked across shots)", precision=0)
421
  to_keyframes_btn = gr.Button("Start Keyframes β†’", variant="secondary")
422
 
423
  with gr.Tab("Keyframes"):
 
427
  with gr.Row():
428
  gen_btn = gr.Button("Generate / Regenerate", variant="primary")
429
  approve_next_btn = gr.Button("Approve & Next β†’", variant="secondary")
430
+ # tuning controls
431
+ with gr.Row():
432
+ img_strength = gr.Slider(0.40, 0.90, value=0.65, step=0.05, label="Change vs Consistency (img2img strength)")
433
+ img_steps = gr.Slider(4, 20, value=10, step=1, label="Img2Img Steps")
434
+ guidance = gr.Slider(0.0, 2.0, value=0.5, step=0.05, label="Guidance Scale")
435
  with gr.Row():
436
  prev_img = gr.Image(label="Previous approved image (conditioning)", type="filepath")
437
  out_img = gr.Image(label="Generated image", type="filepath")
 
470
  outputs=[project, shots_df, sb_status, save_edits_btn]
471
  )
472
 
473
+ def on_save_edits(p, df):
 
 
 
474
  if p is None:
475
  raise gr.Error("No project in memory. Click New Project, then generate a storyboard.")
476
  if df is None:
 
484
 
485
  save_edits_btn.click(on_save_edits, inputs=[project, shots_df], outputs=[project, sb_status])
486
 
487
+ def on_start_keyframes(p, df, proj_seed_override):
488
  if p is None: raise gr.Error("No project.")
489
  shots = df_to_shots(df)
490
  if not shots: raise gr.Error("Storyboard is empty.")
491
+
492
+ # lock a single seed for the project:
493
+ proj_seed = None
494
+ # override if user supplied:
495
+ if proj_seed_override not in [None, ""] and str(proj_seed_override).isdigit():
496
+ proj_seed = int(proj_seed_override)
497
+
498
+ # otherwise use existing project meta seed or find one in shots:
499
+ if proj_seed is None:
500
+ proj_seed = p.get("meta", {}).get("seed", None)
501
+ if proj_seed is None:
502
+ for s in shots:
503
+ if isinstance(s.get("seed"), int):
504
+ proj_seed = int(s["seed"])
505
+ break
506
+ if proj_seed is None:
507
+ proj_seed = int(torch.randint(0, 2**31 - 1, (1,)).item())
508
+
509
+ # apply to all shots missing seed
510
+ for s in shots:
511
+ if not isinstance(s.get("seed"), int):
512
+ s["seed"] = proj_seed
513
+
514
+ p = dict(p)
515
+ p["shots"] = shots
516
+ p["meta"]["seed"] = proj_seed
517
+ p["meta"]["updated"] = now_iso()
518
+ save_project(p)
519
+
520
  idx = 0
521
  prev_path = None
522
+ info = (
523
+ f"**Shot {shots[idx]['id']} β€” {shots[idx]['title']}** \n"
524
+ f"Duration: {shots[idx]['duration']}s @ {shots[idx]['fps']} fps \n"
525
+ f"Locked project seed: `{proj_seed}`"
526
+ )
527
+ return p, 0, gr.update(value=info), gr.update(value=shots[idx]["description"]), gr.update(value=prev_path), gr.update(value=None), gr.update(value=f"Ready to generate shot 1."), gr.update(value=proj_seed)
528
 
529
+ to_keyframes_btn.click(
530
+ on_start_keyframes,
531
+ inputs=[project, shots_df, proj_seed_box],
532
+ outputs=[project, current_idx, shot_info_md, prompt_box, prev_img, out_img, kf_status, proj_seed_box]
533
+ )
534
 
535
+ def on_generate_img(p, idx, current_prompt, i2i_strength_val, i2i_steps_val, guidance_val):
536
  if p is None: raise gr.Error("No project.")
537
  shots = p["shots"]
538
  if idx < 0 or idx >= len(shots): raise gr.Error("Invalid shot index.")
539
+ shots[idx]["description"] = current_prompt # allow tweaking
540
  prev_path = shots[idx-1]["image_path"] if idx > 0 else None
541
+
542
+ img_path = generate_keyframe_image(
543
+ p["meta"]["id"],
544
+ int(idx),
545
+ shots,
546
+ t2i_steps=6,
547
+ i2i_steps=int(i2i_steps_val),
548
+ i2i_strength=float(i2i_strength_val),
549
+ guidance_scale=float(guidance_val),
550
+ width=512,
551
+ height=512
552
+ )
553
  return img_path, (prev_path or None), gr.update(value=f"Generated candidate for shot {shots[idx]['id']}.")
554
 
555
+ gen_btn.click(
556
+ on_generate_img,
557
+ inputs=[project, current_idx, prompt_box, img_strength, img_steps, guidance],
558
+ outputs=[out_img, prev_img, kf_status]
559
+ )
560
 
561
  def on_approve_next(p, idx, current_prompt, latest_img_path):
562
  if p is None: raise gr.Error("No project.")
 
564
  i = int(idx)
565
  if i < 0 or i >= len(shots): raise gr.Error("Invalid shot index.")
566
  if not latest_img_path: raise gr.Error("Generate an image first.")
567
+
568
  # commit
569
  shots[i]["description"] = current_prompt
570
  shots[i]["image_path"] = latest_img_path
 
575
  # next
576
  if i + 1 < len(shots):
577
  ni = i + 1
578
+ info = (
579
+ f"**Shot {shots[ni]['id']} β€” {shots[ni]['title']}** \n"
580
+ f"Duration: {shots[ni]['duration']}s @ {shots[ni]['fps']} fps \n"
581
+ f"Locked project seed: `{p['meta'].get('seed')}`"
582
+ )
583
  prev_path = shots[ni-1]["image_path"]
584
  return p, ni, gr.update(value=info), gr.update(value=shots[ni]["description"]), gr.update(value=prev_path), gr.update(value=None), gr.update(value=f"Approved shot {shots[i]['id']}. On to shot {shots[ni]['id']}.")
585
  else:
 
597
 
598
  def on_load(file_obj):
599
  p = load_project_file(file_obj)
600
+ seed_val = p.get("meta", {}).get("seed", None)
601
  return (
602
  p,
603
  gr.update(value=f"Loaded project `{p['meta']['name']}` (id: `{p['meta']['id']}`)"),
604
  shots_to_df(p.get("shots", [])),
605
+ gr.update(value=seed_val)
606
  )
607
 
608
+ load_btn.click(on_load, inputs=[load_file], outputs=[project, sb_status, shots_df, proj_seed_box])
609
 
610
  if __name__ == "__main__":
611
  demo.launch()