Fabrice-TIERCELIN commited on
Commit
42e915f
·
verified ·
1 Parent(s): f855171

Global variables

Browse files
Files changed (1) hide show
  1. app.py +8 -0
app.py CHANGED
@@ -53,6 +53,12 @@ pillow_heif.register_heif_opener()
53
  high_vram = False
54
  free_mem_gb = 0
55
 
 
 
 
 
 
 
56
  if torch.cuda.device_count() > 0:
57
  free_mem_gb = get_cuda_free_memory_gb(gpu)
58
  high_vram = free_mem_gb > 60
@@ -312,6 +318,7 @@ def worker(input_image, image_position, prompts, n_prompt, seed, resolution, tot
312
  lora_file,
313
  lora_multiplier,
314
  fp8_optimization):
 
315
  is_last_frame = (image_position == 100)
316
  def encode_prompt(prompt, n_prompt):
317
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
@@ -551,6 +558,7 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
551
  lora_file,
552
  lora_multiplier,
553
  fp8_optimization):
 
554
  def encode_prompt(prompt, n_prompt):
555
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
556
 
 
53
  high_vram = False
54
  free_mem_gb = 0
55
 
56
+ transformer = None # load later
57
+ transformer_dtype = torch.bfloat16
58
+ previous_lora_file = None
59
+ previous_lora_multiplier = None
60
+ previous_fp8_optimization = None
61
+
62
  if torch.cuda.device_count() > 0:
63
  free_mem_gb = get_cuda_free_memory_gb(gpu)
64
  high_vram = free_mem_gb > 60
 
318
  lora_file,
319
  lora_multiplier,
320
  fp8_optimization):
321
+ global transformer, previous_lora_file, previous_lora_multiplier, previous_fp8_optimization
322
  is_last_frame = (image_position == 100)
323
  def encode_prompt(prompt, n_prompt):
324
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
 
558
  lora_file,
559
  lora_multiplier,
560
  fp8_optimization):
561
+ global transformer, previous_lora_file, previous_lora_multiplier, previous_fp8_optimization
562
  def encode_prompt(prompt, n_prompt):
563
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
564