primerz commited on
Commit
ce8fe2a
·
verified ·
1 Parent(s): 8fa2bfa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -21
app.py CHANGED
@@ -111,7 +111,7 @@ hf_hub_download(
111
  antelope_download = snapshot_download(repo_id="DIAMONIK7777/antelopev2", local_dir="/data/models/antelopev2")
112
  print(antelope_download)
113
  app = FaceAnalysis(name='antelopev2', root='/data', providers=['CPUExecutionProvider'])
114
- app.prepare(ctx_id=0, det_size=(640, 640))
115
 
116
  # prepare models under ./checkpoints
117
  face_adapter = f'/data/checkpoints/ip-adapter.bin'
@@ -144,7 +144,7 @@ pipe = StableDiffusionXLInstantIDImg2ImgPipeline.from_pretrained("SG161222/RealV
144
 
145
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)
146
  pipe.load_ip_adapter_instantid(face_adapter)
147
- pipe.set_ip_adapter_scale(0.8)
148
  et = time.time()
149
  elapsed_time = et - st
150
  print('Loading pipeline took: ', elapsed_time, 'seconds')
@@ -181,7 +181,7 @@ def update_selection(selected_state: gr.SelectData, sdxl_loras, face_strength, i
181
 
182
  for lora_list in lora_defaults:
183
  if lora_list["model"] == sdxl_loras[selected_state.index]["repo"]:
184
- face_strength = lora_list.get("face_strength", 0.95)
185
  image_strength = lora_list.get("image_strength", 0.2)
186
  weight = lora_list.get("weight", 0.95)
187
  depth_control_scale = lora_list.get("depth_control_scale", 0.8)
@@ -229,8 +229,7 @@ def merge_incompatible_lora(full_path_lora, lora_scale):
229
  )
230
  del weights_sd
231
  del lora_model
232
-
233
- def resize_image_aspect_ratio(img, max_dim=512):
234
  width, height = img.size
235
  aspect_ratio = width / height
236
 
@@ -247,7 +246,6 @@ def resize_image_aspect_ratio(img, max_dim=512):
247
  return img.resize((new_width, new_height), Image.LANCZOS)
248
 
249
 
250
-
251
  def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, custom_lora, progress=gr.Progress(track_tqdm=True)):
252
  print("Custom LoRA:", custom_lora)
253
  custom_lora_path = custom_lora[0] if custom_lora else None
@@ -265,7 +263,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
265
  except:
266
  face_detected = False
267
  face_emb = None
268
- face_kps = face_image # Optional face handling, verify intentional
269
 
270
  et = time.time()
271
  print('Cropping and calculating face embeds took:', et - st, 'seconds')
@@ -299,6 +297,8 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
299
  repo_name = sdxl_loras[selected_state_index]["repo"]
300
  full_path_lora = state_dicts[repo_name]["saved_name"]
301
 
 
 
302
  print("Full path LoRA", full_path_lora)
303
 
304
  et = time.time()
@@ -315,6 +315,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
315
 
316
  run_lora.zerogpu = True
317
 
 
318
  @spaces.GPU(duration=100)
319
  def generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, guidance_scale, face_strength, depth_control_scale, repo_name, loaded_state_dict, lora_scale, sdxl_loras, selected_state_index, face_detected, st):
320
  global last_fused, last_lora
@@ -325,7 +326,6 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
325
  control_images = [face_kps, zoe(face_image)] if face_detected else [zoe(face_image)]
326
  control_scales = [face_strength, depth_control_scale] if face_detected else [depth_control_scale]
327
 
328
- # Handle Hugging Face URL-based LoRA
329
  if repo_name.startswith("https://huggingface.co"):
330
  repo_id = repo_name.split("huggingface.co/")[-1]
331
  fs = HfFileSystem()
@@ -335,24 +335,21 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
335
  if not safetensors_files:
336
  raise gr.Error("No .safetensors file found in this Hugging Face repository.")
337
 
338
- weight_file = safetensors_files[0] # Dynamically select the first available .safetensors file
339
  full_path_lora = hf_hub_download(repo_id=repo_id, filename=weight_file, repo_type="model")
340
- loaded_state_dict = load_file(full_path_lora)
341
  else:
342
- # Use the previously loaded state_dict if not using a Hugging Face URL
343
- loaded_state_dict = state_dicts[repo_name]["state_dict"]
344
 
345
- # Manage LoRA weights and textual inversion embeddings
346
  if last_lora != repo_name:
347
  if last_fused:
348
  pipe.unfuse_lora()
349
  pipe.unload_lora_weights()
350
  pipe.unload_textual_inversion()
351
- pipe.load_lora_weights(loaded_state_dict)
 
352
  pipe.fuse_lora(lora_scale)
353
  last_fused = True
354
 
355
- # Handle pivotal tuning (textual inversion embeddings)
356
  is_pivotal = sdxl_loras[selected_state_index]["is_pivotal"]
357
  if is_pivotal:
358
  text_embedding_name = sdxl_loras[selected_state_index]["text_embedding_weights"]
@@ -361,12 +358,10 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
361
  pipe.load_textual_inversion(state_dict_embedding["clip_l" if "clip_l" in state_dict_embedding else "text_encoders_0"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
362
  pipe.load_textual_inversion(state_dict_embedding["clip_g" if "clip_g" in state_dict_embedding else "text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
363
 
364
- # Prompt embeddings
365
  print("Processing prompt...")
366
  conditioning, pooled = compel(prompt)
367
  negative_conditioning, negative_pooled = compel(negative) if negative else (None, None)
368
 
369
- # Image generation
370
  print("Generating image...")
371
  image = pipe(
372
  prompt_embeds=conditioning,
@@ -379,7 +374,7 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
379
  image=face_image,
380
  strength=1-image_strength,
381
  control_image=control_images,
382
- num_inference_steps=36,
383
  guidance_scale=guidance_scale,
384
  controlnet_conditioning_scale=control_scales,
385
  ).images[0]
@@ -560,9 +555,9 @@ with gr.Blocks(css="custom.css") as demo:
560
  with gr.Accordion("Advanced options", open=False):
561
  negative = gr.Textbox(label="Negative Prompt")
562
  weight = gr.Slider(0, 10, value=0.9, step=0.1, label="LoRA weight")
563
- face_strength = gr.Slider(0, 2, value=0.85, step=0.01, label="Face strength", info="Higher values increase the face likeness but reduce the creative liberty of the models")
564
- image_strength = gr.Slider(0, 1, value=0.15, step=0.01, label="Image strength", info="Higher values increase the similarity with the structure/colors of the original photo")
565
- guidance_scale = gr.Slider(0, 50, value=7, step=0.1, label="Guidance Scale")
566
  depth_control_scale = gr.Slider(0, 1, value=0.8, step=0.01, label="Zoe Depth ControlNet strenght")
567
  prompt_title = gr.Markdown(
568
  value="### Click on a LoRA in the gallery to select it",
 
111
  antelope_download = snapshot_download(repo_id="DIAMONIK7777/antelopev2", local_dir="/data/models/antelopev2")
112
  print(antelope_download)
113
  app = FaceAnalysis(name='antelopev2', root='/data', providers=['CPUExecutionProvider'])
114
+ app.prepare(ctx_id=0, det_size=(768, 768))
115
 
116
  # prepare models under ./checkpoints
117
  face_adapter = f'/data/checkpoints/ip-adapter.bin'
 
144
 
145
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)
146
  pipe.load_ip_adapter_instantid(face_adapter)
147
+ pipe.set_ip_adapter_scale(0.9)
148
  et = time.time()
149
  elapsed_time = et - st
150
  print('Loading pipeline took: ', elapsed_time, 'seconds')
 
181
 
182
  for lora_list in lora_defaults:
183
  if lora_list["model"] == sdxl_loras[selected_state.index]["repo"]:
184
+ face_strength = lora_list.get("face_strength", 0.9)
185
  image_strength = lora_list.get("image_strength", 0.2)
186
  weight = lora_list.get("weight", 0.95)
187
  depth_control_scale = lora_list.get("depth_control_scale", 0.8)
 
229
  )
230
  del weights_sd
231
  del lora_model
232
+ def resize_image_aspect_ratio(img, max_dim=1280):
 
233
  width, height = img.size
234
  aspect_ratio = width / height
235
 
 
246
  return img.resize((new_width, new_height), Image.LANCZOS)
247
 
248
 
 
249
  def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, custom_lora, progress=gr.Progress(track_tqdm=True)):
250
  print("Custom LoRA:", custom_lora)
251
  custom_lora_path = custom_lora[0] if custom_lora else None
 
263
  except:
264
  face_detected = False
265
  face_emb = None
266
+ face_kps = face_image
267
 
268
  et = time.time()
269
  print('Cropping and calculating face embeds took:', et - st, 'seconds')
 
297
  repo_name = sdxl_loras[selected_state_index]["repo"]
298
  full_path_lora = state_dicts[repo_name]["saved_name"]
299
 
300
+ repo_name = repo_name.rstrip("/").lower()
301
+
302
  print("Full path LoRA", full_path_lora)
303
 
304
  et = time.time()
 
315
 
316
  run_lora.zerogpu = True
317
 
318
+
319
  @spaces.GPU(duration=100)
320
  def generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, guidance_scale, face_strength, depth_control_scale, repo_name, loaded_state_dict, lora_scale, sdxl_loras, selected_state_index, face_detected, st):
321
  global last_fused, last_lora
 
326
  control_images = [face_kps, zoe(face_image)] if face_detected else [zoe(face_image)]
327
  control_scales = [face_strength, depth_control_scale] if face_detected else [depth_control_scale]
328
 
 
329
  if repo_name.startswith("https://huggingface.co"):
330
  repo_id = repo_name.split("huggingface.co/")[-1]
331
  fs = HfFileSystem()
 
335
  if not safetensors_files:
336
  raise gr.Error("No .safetensors file found in this Hugging Face repository.")
337
 
338
+ weight_file = safetensors_files[0]
339
  full_path_lora = hf_hub_download(repo_id=repo_id, filename=weight_file, repo_type="model")
 
340
  else:
341
+ full_path_lora = loaded_state_dict
 
342
 
 
343
  if last_lora != repo_name:
344
  if last_fused:
345
  pipe.unfuse_lora()
346
  pipe.unload_lora_weights()
347
  pipe.unload_textual_inversion()
348
+
349
+ pipe.load_lora_weights(full_path_lora)
350
  pipe.fuse_lora(lora_scale)
351
  last_fused = True
352
 
 
353
  is_pivotal = sdxl_loras[selected_state_index]["is_pivotal"]
354
  if is_pivotal:
355
  text_embedding_name = sdxl_loras[selected_state_index]["text_embedding_weights"]
 
358
  pipe.load_textual_inversion(state_dict_embedding["clip_l" if "clip_l" in state_dict_embedding else "text_encoders_0"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
359
  pipe.load_textual_inversion(state_dict_embedding["clip_g" if "clip_g" in state_dict_embedding else "text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
360
 
 
361
  print("Processing prompt...")
362
  conditioning, pooled = compel(prompt)
363
  negative_conditioning, negative_pooled = compel(negative) if negative else (None, None)
364
 
 
365
  print("Generating image...")
366
  image = pipe(
367
  prompt_embeds=conditioning,
 
374
  image=face_image,
375
  strength=1-image_strength,
376
  control_image=control_images,
377
+ num_inference_steps=40,
378
  guidance_scale=guidance_scale,
379
  controlnet_conditioning_scale=control_scales,
380
  ).images[0]
 
555
  with gr.Accordion("Advanced options", open=False):
556
  negative = gr.Textbox(label="Negative Prompt")
557
  weight = gr.Slider(0, 10, value=0.9, step=0.1, label="LoRA weight")
558
+ face_strength = gr.Slider(0, 2, value=0.9, step=0.01, label="Face strength", info="Higher values increase the face likeness but reduce the creative liberty of the models")
559
+ image_strength = gr.Slider(0, 1, value=0.20, step=0.01, label="Image strength", info="Higher values increase the similarity with the structure/colors of the original photo")
560
+ guidance_scale = gr.Slider(0, 50, value=8, step=0.1, label="Guidance Scale")
561
  depth_control_scale = gr.Slider(0, 1, value=0.8, step=0.01, label="Zoe Depth ControlNet strenght")
562
  prompt_title = gr.Markdown(
563
  value="### Click on a LoRA in the gallery to select it",