John6666 commited on
Commit
b094393
·
verified ·
1 Parent(s): 18a0647

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +158 -149
  2. constants.py +36 -2
  3. modutils.py +1 -1
  4. requirements.txt +1 -1
app.py CHANGED
@@ -8,6 +8,7 @@ from stablepy import (
8
  check_scheduler_compatibility,
9
  TASK_AND_PREPROCESSORS,
10
  FACE_RESTORATION_MODELS,
 
11
  scheduler_names,
12
  )
13
  from constants import (
@@ -398,7 +399,7 @@ class GuiSD:
398
  t2i_adapter_preprocessor,
399
  t2i_adapter_conditioning_scale,
400
  t2i_adapter_conditioning_factor,
401
- xformers_memory_efficient_attention,
402
  freeu,
403
  generator_in_cpu,
404
  adetailer_inpaint_only,
@@ -577,7 +578,7 @@ class GuiSD:
577
  "sampler": sampler,
578
  "schedule_type": schedule_type,
579
  "schedule_prediction_type": schedule_prediction_type,
580
- "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
581
  "gui_active": True,
582
  "loop_generation": loop_generation,
583
  "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
@@ -592,7 +593,7 @@ class GuiSD:
592
  "leave_progress_bar": leave_progress_bar,
593
  "disable_progress_bar": disable_progress_bar,
594
  "image_previews": image_previews,
595
- "display_images": display_images,
596
  "save_generated_images": save_generated_images,
597
  "filename_pattern": filename_pattern,
598
  "image_storage_location": image_storage_location,
@@ -668,13 +669,16 @@ class GuiSD:
668
  )
669
  if save_generated_images:
670
  info_images += f"<br>{download_links}"
671
-
672
  ## BEGIN MOD
673
  img = save_images(img, metadata)
 
 
674
  ## END MOD
675
-
676
  info_state = "COMPLETE"
677
 
 
 
 
678
  yield info_state, img, info_images
679
 
680
 
@@ -957,142 +961,147 @@ with gr.Blocks(theme=args.theme, elem_id="main", fill_width=True, fill_height=Fa
957
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
958
  prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
959
 
960
- with gr.Row(equal_height=False):
961
-
962
- def run_set_params_gui(base_prompt, name_model):
963
- valid_receptors = { # default values
964
- "prompt": gr.update(value=base_prompt),
965
- "neg_prompt": gr.update(value=""),
966
- "Steps": gr.update(value=30),
967
- "width": gr.update(value=1024),
968
- "height": gr.update(value=1024),
969
- "Seed": gr.update(value=-1),
970
- "Sampler": gr.update(value="Euler"),
971
- "CFG scale": gr.update(value=7.), # cfg
972
- "Clip skip": gr.update(value=True),
973
- "Model": gr.update(value=name_model),
974
- "Schedule type": gr.update(value="Automatic"),
975
- "PAG": gr.update(value=.0),
976
- "FreeU": gr.update(value=False),
977
- "Hires upscaler": gr.update(),
978
- "Hires upscale": gr.update(),
979
- "Hires steps": gr.update(),
980
- "Hires denoising strength": gr.update(),
981
- "Hires CFG": gr.update(),
982
- "Hires sampler": gr.update(),
983
- "Hires schedule type": gr.update(),
984
- "Image resolution": gr.update(value=1024),
985
- "Strength": gr.update(),
986
- }
987
-
988
- # Generate up to 7 LoRAs
989
- for i in range(1, 8):
990
- valid_receptors[f"Lora_{i}"] = gr.update()
991
- valid_receptors[f"Lora_scale_{i}"] = gr.update()
992
-
993
- valid_keys = list(valid_receptors.keys())
994
-
995
- parameters = extract_parameters(base_prompt)
996
- # print(parameters)
997
-
998
- if "Sampler" in parameters:
999
- value_sampler = parameters["Sampler"]
1000
- for s_type in SCHEDULE_TYPE_OPTIONS:
1001
- if s_type in value_sampler:
1002
- value_sampler = value_sampler.replace(s_type, "").strip()
1003
- parameters["Sampler"] = value_sampler
1004
- parameters["Schedule type"] = s_type
1005
-
1006
- params_lora = []
1007
- if ">" in parameters["prompt"] and "<" in parameters["prompt"]:
1008
- params_lora = re.findall(r'<lora:[^>]+>', parameters["prompt"])
1009
- if "Loras" in parameters:
1010
- params_lora += re.findall(r'<lora:[^>]+>', parameters["Loras"])
1011
-
1012
- if params_lora:
1013
- parsed_params = []
1014
- for tag_l in params_lora:
1015
- try:
1016
- inner = tag_l.strip("<>") # remove < >
1017
- _, data_l = inner.split(":", 1) # remove the "lora:" part
1018
- parts_l = data_l.split(":")
1019
-
1020
- name_l = parts_l[0]
1021
- weight_l = float(parts_l[1]) if len(parts_l) > 1 else 1.0 # default weight = 1.0
1022
-
1023
- parsed_params.append((name_l, weight_l))
1024
- except Exception as e:
1025
- print(f"Error parsing LoRA tag {tag_l}: {e}")
1026
-
1027
- num_lora = 1
1028
- for parsed_l, parsed_s in parsed_params:
1029
- filtered_loras = [m for m in lora_model_list if parsed_l in m]
1030
- if filtered_loras:
1031
- parameters[f"Lora_{num_lora}"] = filtered_loras[0]
1032
- parameters[f"Lora_scale_{num_lora}"] = parsed_s
1033
- num_lora += 1
1034
-
1035
- # continue = discard new value
1036
- for key, val in parameters.items():
1037
- # print(val)
1038
- if key in valid_keys:
1039
- try:
1040
- if key == "Sampler":
1041
- if val not in scheduler_names:
1042
- continue
1043
- if key in ["Schedule type", "Hires schedule type"]:
1044
- if val not in SCHEDULE_TYPE_OPTIONS:
1045
- continue
1046
- if key == "Hires sampler":
1047
- if val not in POST_PROCESSING_SAMPLER:
1048
- continue
1049
- elif key == "Clip skip":
1050
- if "," in str(val):
1051
- val = val.replace(",", "")
1052
- if int(val) >= 2:
1053
- val = True
1054
- if key == "prompt":
1055
- if ">" in val and "<" in val:
1056
- val = re.sub(r'<[^>]+>', '', val) # Delete html and loras
1057
- print("Removed LoRA written in the prompt")
1058
- if key in ["prompt", "neg_prompt"]:
1059
- val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
1060
- if key in ["Steps", "width", "height", "Seed", "Hires steps", "Image resolution"]:
1061
- val = int(val)
1062
- if key == "FreeU":
1063
- val = True
1064
- if key in ["CFG scale", "PAG", "Hires upscale", "Hires denoising strength", "Hires CFG", "Strength"]:
1065
- val = float(val)
1066
- if key == "Model":
1067
- filtered_models = [m for m in model_list if val in m]
1068
- if filtered_models:
1069
- val = filtered_models[0]
1070
- else:
1071
- val = name_model
1072
- if key == "Hires upscaler":
1073
- if val not in UPSCALER_KEYS:
1074
- continue
1075
- if key == "Seed":
1076
- continue
1077
-
1078
- valid_receptors[key] = gr.update(value=val)
1079
- # print(val, type(val))
1080
- # print(valid_receptors)
1081
- except Exception as e:
1082
- print(str(e))
1083
- return [value for value in valid_receptors.values()]
1084
-
1085
- def run_clear_prompt_gui():
1086
- return gr.update(value=""), gr.update(value="")
1087
- clear_prompt_gui.click(
1088
- run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui], show_api=False
1089
- )
 
 
 
 
 
1090
 
1091
- def run_set_random_seed():
1092
- return -1
1093
- set_random_seed.click(
1094
- run_set_random_seed, [], seed_gui, show_api=False
1095
- )
1096
 
1097
  with gr.Accordion("LoRA", open=False, visible=True) as menu_lora:
1098
  def lora_dropdown(label, visible=True):
@@ -1147,16 +1156,16 @@ with gr.Blocks(theme=args.theme, elem_id="main", fill_width=True, fill_height=Fa
1147
  lora5_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1148
  lora5_desc_gui = gr.Markdown(value="", visible=False)
1149
  with gr.Column():
1150
- lora6_gui = lora_dropdown("LoRA6", visible=(not IS_ZERO_GPU))
1151
- lora_scale_6_gui = lora_scale_slider("LoRA Scale 6", visible=(not IS_ZERO_GPU))
1152
  with gr.Row():
1153
  with gr.Group():
1154
  lora6_info_gui = lora_textbox("LoRA6 prompts")
1155
  lora6_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1156
  lora6_desc_gui = gr.Markdown(value="", visible=False)
1157
  with gr.Column():
1158
- lora7_gui = lora_dropdown("LoRA7", visible=(not IS_ZERO_GPU))
1159
- lora_scale_7_gui = lora_scale_slider("LoRA Scale 7", visible=(not IS_ZERO_GPU))
1160
  with gr.Row():
1161
  with gr.Group():
1162
  lora7_info_gui = lora_textbox("LoRA7 prompts")
@@ -1362,6 +1371,8 @@ with gr.Blocks(theme=args.theme, elem_id="main", fill_width=True, fill_height=Fa
1362
  with gr.Accordion("Other settings", open=False, visible=True) as menu_other:
1363
  with gr.Row():
1364
  save_generated_images_gui = gr.Checkbox(value=False, label="Save Generated Images")
 
 
1365
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1366
  with gr.Row():
1367
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
@@ -1375,12 +1386,10 @@ with gr.Blocks(theme=args.theme, elem_id="main", fill_width=True, fill_height=Fa
1375
  with gr.Accordion("More settings", open=False, visible=False):
1376
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1377
  retain_task_cache_gui = gr.Checkbox(value=True, label="Retain task model in cache")
1378
- display_images_gui = gr.Checkbox(value=False, label="Display Images")
1379
- image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1380
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1381
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1382
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1383
- xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
1384
 
1385
  set_params_gui.click(
1386
  run_set_params_gui, [prompt_gui, model_name_gui], [
@@ -1406,6 +1415,7 @@ with gr.Blocks(theme=args.theme, elem_id="main", fill_width=True, fill_height=Fa
1406
  hires_schedule_type_gui,
1407
  image_resolution_gui,
1408
  strength_gui,
 
1409
  lora1_gui,
1410
  lora_scale_1_gui,
1411
  lora2_gui,
@@ -1421,7 +1431,6 @@ with gr.Blocks(theme=args.theme, elem_id="main", fill_width=True, fill_height=Fa
1421
  lora7_gui,
1422
  lora_scale_7_gui,
1423
  ],
1424
- show_api=False,
1425
  )
1426
 
1427
  with gr.Accordion("Examples and help", open=True, visible=True) as menu_example:
@@ -1741,7 +1750,7 @@ with gr.Blocks(theme=args.theme, elem_id="main", fill_width=True, fill_height=Fa
1741
  t2i_adapter_preprocessor_gui,
1742
  adapter_conditioning_scale_gui,
1743
  adapter_conditioning_factor_gui,
1744
- xformers_memory_efficient_attention_gui,
1745
  free_u_gui,
1746
  generator_in_cpu_gui,
1747
  adetailer_inpaint_only_gui,
 
8
  check_scheduler_compatibility,
9
  TASK_AND_PREPROCESSORS,
10
  FACE_RESTORATION_MODELS,
11
+ PROMPT_WEIGHT_OPTIONS_PRIORITY,
12
  scheduler_names,
13
  )
14
  from constants import (
 
399
  t2i_adapter_preprocessor,
400
  t2i_adapter_conditioning_scale,
401
  t2i_adapter_conditioning_factor,
402
+ enable_live_preview,
403
  freeu,
404
  generator_in_cpu,
405
  adetailer_inpaint_only,
 
578
  "sampler": sampler,
579
  "schedule_type": schedule_type,
580
  "schedule_prediction_type": schedule_prediction_type,
581
+ "xformers_memory_efficient_attention": False,
582
  "gui_active": True,
583
  "loop_generation": loop_generation,
584
  "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
 
593
  "leave_progress_bar": leave_progress_bar,
594
  "disable_progress_bar": disable_progress_bar,
595
  "image_previews": image_previews,
596
+ "display_images": False,
597
  "save_generated_images": save_generated_images,
598
  "filename_pattern": filename_pattern,
599
  "image_storage_location": image_storage_location,
 
669
  )
670
  if save_generated_images:
671
  info_images += f"<br>{download_links}"
 
672
  ## BEGIN MOD
673
  img = save_images(img, metadata)
674
+ if not display_images:
675
+ img = gr.update(value=img) if img else gr.update()
676
  ## END MOD
 
677
  info_state = "COMPLETE"
678
 
679
+ elif not enable_live_preview:
680
+ img = gr.update()
681
+
682
  yield info_state, img, info_images
683
 
684
 
 
961
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
962
  prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
963
 
964
+ def run_set_params_gui(base_prompt, name_model):
965
+ valid_receptors = { # default values
966
+ "prompt": gr.update(value=base_prompt),
967
+ "neg_prompt": gr.update(value=""),
968
+ "Steps": gr.update(value=30),
969
+ "width": gr.update(value=1024),
970
+ "height": gr.update(value=1024),
971
+ "Seed": gr.update(value=-1),
972
+ "Sampler": gr.update(value="Euler"),
973
+ "CFG scale": gr.update(value=7.), # cfg
974
+ "Clip skip": gr.update(value=True),
975
+ "Model": gr.update(value=name_model),
976
+ "Schedule type": gr.update(value="Automatic"),
977
+ "PAG": gr.update(value=.0),
978
+ "FreeU": gr.update(value=False),
979
+ "Hires upscaler": gr.update(),
980
+ "Hires upscale": gr.update(),
981
+ "Hires steps": gr.update(),
982
+ "Hires denoising strength": gr.update(),
983
+ "Hires CFG": gr.update(),
984
+ "Hires sampler": gr.update(),
985
+ "Hires schedule type": gr.update(),
986
+ "Image resolution": gr.update(value=1024),
987
+ "Strength": gr.update(),
988
+ "Prompt emphasis": gr.update(),
989
+ }
990
+
991
+ # Generate up to 7 LoRAs
992
+ for i in range(1, 8):
993
+ valid_receptors[f"Lora_{i}"] = gr.update()
994
+ valid_receptors[f"Lora_scale_{i}"] = gr.update()
995
+
996
+ valid_keys = list(valid_receptors.keys())
997
+
998
+ parameters = extract_parameters(base_prompt)
999
+ # print(parameters)
1000
+
1001
+ if "Sampler" in parameters:
1002
+ value_sampler = parameters["Sampler"]
1003
+ for s_type in SCHEDULE_TYPE_OPTIONS:
1004
+ if s_type in value_sampler:
1005
+ value_sampler = value_sampler.replace(s_type, "").strip()
1006
+ parameters["Sampler"] = value_sampler
1007
+ parameters["Schedule type"] = s_type
1008
+
1009
+ params_lora = []
1010
+ if ">" in parameters["prompt"] and "<" in parameters["prompt"]:
1011
+ params_lora = re.findall(r'<lora:[^>]+>', parameters["prompt"])
1012
+ if "Loras" in parameters:
1013
+ params_lora += re.findall(r'<lora:[^>]+>', parameters["Loras"])
1014
+
1015
+ if params_lora:
1016
+ parsed_params = []
1017
+ for tag_l in params_lora:
1018
+ try:
1019
+ inner = tag_l.strip("<>") # remove < >
1020
+ _, data_l = inner.split(":", 1) # remove the "lora:" part
1021
+ parts_l = data_l.split(":")
1022
+
1023
+ name_l = parts_l[0]
1024
+ weight_l = float(parts_l[1]) if len(parts_l) > 1 else 1.0 # default weight = 1.0
1025
+
1026
+ parsed_params.append((name_l, weight_l))
1027
+ except Exception as e:
1028
+ print(f"Error parsing LoRA tag {tag_l}: {e}")
1029
+
1030
+ new_lora_model_list = get_model_list(DIRECTORY_LORAS)
1031
+ new_lora_model_list.insert(0, "None")
1032
+
1033
+ num_lora = 1
1034
+ for parsed_l, parsed_s in parsed_params:
1035
+ filtered_loras = [m for m in new_lora_model_list if parsed_l in m]
1036
+ if filtered_loras:
1037
+ parameters[f"Lora_{num_lora}"] = filtered_loras[0]
1038
+ parameters[f"Lora_scale_{num_lora}"] = parsed_s
1039
+ num_lora += 1
1040
+
1041
+ # continue = discard new value
1042
+ for key, val in parameters.items():
1043
+ # print(val)
1044
+ if key in valid_keys:
1045
+ try:
1046
+ if key == "Sampler":
1047
+ if val not in scheduler_names:
1048
+ continue
1049
+ if key in ["Schedule type", "Hires schedule type"]:
1050
+ if val not in SCHEDULE_TYPE_OPTIONS:
1051
+ continue
1052
+ if key == "Hires sampler":
1053
+ if val not in POST_PROCESSING_SAMPLER:
1054
+ continue
1055
+ if key == "Prompt emphasis":
1056
+ if val not in PROMPT_WEIGHT_OPTIONS_PRIORITY:
1057
+ continue
1058
+ elif key == "Clip skip":
1059
+ if "," in str(val):
1060
+ val = val.replace(",", "")
1061
+ if int(val) >= 2:
1062
+ val = True
1063
+ if key == "prompt":
1064
+ if ">" in val and "<" in val:
1065
+ val = re.sub(r'<[^>]+>', '', val) # Delete html and loras
1066
+ print("Removed LoRA written in the prompt")
1067
+ if key in ["prompt", "neg_prompt"]:
1068
+ val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
1069
+ if key in ["Steps", "width", "height", "Seed", "Hires steps", "Image resolution"]:
1070
+ val = int(val)
1071
+ if key == "FreeU":
1072
+ val = True
1073
+ if key in ["CFG scale", "PAG", "Hires upscale", "Hires denoising strength", "Hires CFG", "Strength"]:
1074
+ val = float(val)
1075
+ if key == "Model":
1076
+ filtered_models = [m for m in model_list if val in m]
1077
+ if filtered_models:
1078
+ val = filtered_models[0]
1079
+ else:
1080
+ val = name_model
1081
+ if key == "Hires upscaler":
1082
+ if val not in UPSCALER_KEYS:
1083
+ continue
1084
+ if key == "Seed":
1085
+ continue
1086
+
1087
+ valid_receptors[key] = gr.update(value=val)
1088
+ # print(val, type(val))
1089
+ # print(valid_receptors)
1090
+ except Exception as e:
1091
+ print(str(e))
1092
+ return [value for value in valid_receptors.values()]
1093
+
1094
+ def run_clear_prompt_gui():
1095
+ return gr.update(value=""), gr.update(value="")
1096
+ clear_prompt_gui.click(
1097
+ run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
1098
+ )
1099
 
1100
+ def run_set_random_seed():
1101
+ return -1
1102
+ set_random_seed.click(
1103
+ run_set_random_seed, [], seed_gui
1104
+ )
1105
 
1106
  with gr.Accordion("LoRA", open=False, visible=True) as menu_lora:
1107
  def lora_dropdown(label, visible=True):
 
1156
  lora5_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1157
  lora5_desc_gui = gr.Markdown(value="", visible=False)
1158
  with gr.Column():
1159
+ lora6_gui = lora_dropdown("LoRA6")
1160
+ lora_scale_6_gui = lora_scale_slider("LoRA Scale 6")
1161
  with gr.Row():
1162
  with gr.Group():
1163
  lora6_info_gui = lora_textbox("LoRA6 prompts")
1164
  lora6_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1165
  lora6_desc_gui = gr.Markdown(value="", visible=False)
1166
  with gr.Column():
1167
+ lora7_gui = lora_dropdown("LoRA7")
1168
+ lora_scale_7_gui = lora_scale_slider("LoRA Scale 7")
1169
  with gr.Row():
1170
  with gr.Group():
1171
  lora7_info_gui = lora_textbox("LoRA7 prompts")
 
1371
  with gr.Accordion("Other settings", open=False, visible=True) as menu_other:
1372
  with gr.Row():
1373
  save_generated_images_gui = gr.Checkbox(value=False, label="Save Generated Images")
1374
+ enable_live_preview_gui = gr.Checkbox(value=True, label="Enable live previews")
1375
+ display_images_gui = gr.Checkbox(value=True, label="Show final results")
1376
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1377
  with gr.Row():
1378
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
 
1386
  with gr.Accordion("More settings", open=False, visible=False):
1387
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1388
  retain_task_cache_gui = gr.Checkbox(value=True, label="Retain task model in cache")
1389
+ image_previews_gui = gr.Checkbox(value=True, label="Image Previews (alt)")
 
1390
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1391
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1392
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
 
1393
 
1394
  set_params_gui.click(
1395
  run_set_params_gui, [prompt_gui, model_name_gui], [
 
1415
  hires_schedule_type_gui,
1416
  image_resolution_gui,
1417
  strength_gui,
1418
+ prompt_syntax_gui,
1419
  lora1_gui,
1420
  lora_scale_1_gui,
1421
  lora2_gui,
 
1431
  lora7_gui,
1432
  lora_scale_7_gui,
1433
  ],
 
1434
  )
1435
 
1436
  with gr.Accordion("Examples and help", open=True, visible=True) as menu_example:
 
1750
  t2i_adapter_preprocessor_gui,
1751
  adapter_conditioning_scale_gui,
1752
  adapter_conditioning_factor_gui,
1753
+ enable_live_preview_gui,
1754
  free_u_gui,
1755
  generator_in_cpu_gui,
1756
  adetailer_inpaint_only_gui,
constants.py CHANGED
@@ -16,12 +16,15 @@ IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
16
  DOWNLOAD_MODEL = "https://huggingface.co/zuv0/test/resolve/main/milkyWonderland_v40.safetensors"
17
 
18
  # - **Download VAEs**
19
- DOWNLOAD_VAE = "https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
20
 
21
  # - **Download LoRAs**
22
  DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
23
 
24
  LOAD_DIFFUSERS_FORMAT_MODEL = [
 
 
 
25
  'stabilityai/stable-diffusion-xl-base-1.0',
26
  'Laxhar/noobai-XL-1.1',
27
  'Laxhar/noobai-XL-Vpred-1.0',
@@ -51,8 +54,10 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
51
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
52
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
53
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
 
54
  'John6666/mistoon-anime-v10illustrious-sdxl',
55
  'John6666/hassaku-xl-illustrious-v22-sdxl',
 
56
  'John6666/haruki-mix-illustrious-v10-sdxl',
57
  'John6666/noobreal-v10-sdxl',
58
  'John6666/complicated-noobai-merge-vprediction-sdxl',
@@ -64,6 +69,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
64
  'Laxhar/noobai-XL-Vpred-0.6',
65
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
66
  'John6666/cat-tower-noobai-xl-checkpoint-v15vpred-sdxl',
 
67
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
68
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
69
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
@@ -78,19 +84,31 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
78
  'John6666/one-obsession-14-24d-sdxl',
79
  'John6666/one-obsession-15-noobai-sdxl',
80
  'John6666/one-obsession-v16-noobai-sdxl',
 
 
 
 
 
81
  'John6666/prefect-illustrious-xl-v3-sdxl',
 
 
82
  'John6666/wai-nsfw-illustrious-v70-sdxl',
83
  'John6666/wai-nsfw-illustrious-sdxl-v140-sdxl',
 
84
  'John6666/illustrious-pony-mix-v3-sdxl',
85
  'John6666/nova-anime-xl-il-v90-sdxl',
86
  'John6666/nova-anime-xl-il-v110-sdxl',
 
87
  'John6666/nova-orange-xl-re-v10-sdxl',
88
  'John6666/nova-orange-xl-v110-sdxl',
89
  'John6666/nova-orange-xl-re-v20-sdxl',
90
  'John6666/nova-unreal-xl-v60-sdxl',
91
  'John6666/nova-unreal-xl-v70-sdxl',
92
  'John6666/nova-unreal-xl-v80-sdxl',
 
93
  'John6666/nova-cartoon-xl-v40-sdxl',
 
 
94
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
95
  'eienmojiki/Anything-XL',
96
  'eienmojiki/Starry-XL-v5.2',
@@ -151,6 +169,22 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
151
  'yodayo-ai/kivotos-xl-2.0',
152
  'yodayo-ai/holodayo-xl-2.1',
153
  'yodayo-ai/clandestine-xl-1.0',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  'https://huggingface.co/chemwolf/Karmix-XL-v0/resolve/main/Karmix-XL-v0.safetensors?download=true',
155
  'https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16',
156
  'https://civitai.com/models/30240?modelVersionId=125771',
@@ -603,4 +637,4 @@ RESOURCES = (
603
  - Try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
604
  - `DiffuseCraft` in Colab:[link](https://github.com/R3gm/DiffuseCraft?tab=readme-ov-file#diffusecraft).
605
  """
606
- )
 
16
  DOWNLOAD_MODEL = "https://huggingface.co/zuv0/test/resolve/main/milkyWonderland_v40.safetensors"
17
 
18
  # - **Download VAEs**
19
+ DOWNLOAD_VAE = "https://huggingface.co/Anzhc/Anzhcs-VAEs/resolve/main/SDXL%20Anime%20VAE%20Dec-only%20B3.safetensors, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
20
 
21
  # - **Download LoRAs**
22
  DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
23
 
24
  LOAD_DIFFUSERS_FORMAT_MODEL = [
25
+ 'TestOrganizationPleaseIgnore/potato_quality_anime_plzwork_sdxl',
26
+ 'TestOrganizationPleaseIgnore/rinAnim8drawIllustriousXL_v20_sdxl',
27
+ 'TestOrganizationPleaseIgnore/perfectrsbmixIllustrious_definitiveiota_sdxl',
28
  'stabilityai/stable-diffusion-xl-base-1.0',
29
  'Laxhar/noobai-XL-1.1',
30
  'Laxhar/noobai-XL-Vpred-1.0',
 
54
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
55
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
56
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
57
+ 'martineux/nova-unreal10',
58
  'John6666/mistoon-anime-v10illustrious-sdxl',
59
  'John6666/hassaku-xl-illustrious-v22-sdxl',
60
+ 'John6666/hassaku-xl-illustrious-v31-sdxl',
61
  'John6666/haruki-mix-illustrious-v10-sdxl',
62
  'John6666/noobreal-v10-sdxl',
63
  'John6666/complicated-noobai-merge-vprediction-sdxl',
 
69
  'Laxhar/noobai-XL-Vpred-0.6',
70
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
71
  'John6666/cat-tower-noobai-xl-checkpoint-v15vpred-sdxl',
72
+ 'John6666/cat-tower-noobai-xl-checkpoint-v20-vpred-sdxl',
73
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
74
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
75
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
 
84
  'John6666/one-obsession-14-24d-sdxl',
85
  'John6666/one-obsession-15-noobai-sdxl',
86
  'John6666/one-obsession-v16-noobai-sdxl',
87
+ 'John6666/one-obsession-17-red-sdxl',
88
+ 'martineux/oneobs18',
89
+ 'martineux/oneobsession19',
90
+ 'John6666/cat-tower-noobai-xl-checkpoint-v14-epsilon-pred-sdxl',
91
+ 'martineux/cattower-chenkin-xl',
92
  'John6666/prefect-illustrious-xl-v3-sdxl',
93
+ 'martineux/perfect4',
94
+ 'martineux/prefectIllustriousXL_v5',
95
  'John6666/wai-nsfw-illustrious-v70-sdxl',
96
  'John6666/wai-nsfw-illustrious-sdxl-v140-sdxl',
97
+ 'martineux/waiIllustriousSDXL_v160',
98
  'John6666/illustrious-pony-mix-v3-sdxl',
99
  'John6666/nova-anime-xl-il-v90-sdxl',
100
  'John6666/nova-anime-xl-il-v110-sdxl',
101
+ 'frankjoshua/novaAnimeXL_ilV140',
102
  'John6666/nova-orange-xl-re-v10-sdxl',
103
  'John6666/nova-orange-xl-v110-sdxl',
104
  'John6666/nova-orange-xl-re-v20-sdxl',
105
  'John6666/nova-unreal-xl-v60-sdxl',
106
  'John6666/nova-unreal-xl-v70-sdxl',
107
  'John6666/nova-unreal-xl-v80-sdxl',
108
+ 'martineux/nova-unreal10',
109
  'John6666/nova-cartoon-xl-v40-sdxl',
110
+ 'martineux/novacartoon6',
111
+ 'martineux/novareal8',
112
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
113
  'eienmojiki/Anything-XL',
114
  'eienmojiki/Starry-XL-v5.2',
 
169
  'yodayo-ai/kivotos-xl-2.0',
170
  'yodayo-ai/holodayo-xl-2.1',
171
  'yodayo-ai/clandestine-xl-1.0',
172
+ 'Raelina/Raehoshi-illust-XL-8',
173
+ 'johnkillington/chenkinxmilfynoobai_v20-MLX',
174
+ 'martineux/unholydesire5-xl',
175
+ 'abacaxthebrave/Unholy_Desire_Mix_ILXL',
176
+ 'martineux/diving5',
177
+ 'martineux/diving7',
178
+ 'martineux/mergestein-animuplus-xl',
179
+ 'martineux/mergestein-uncannyr2-xl',
180
+ 'martineux/steincustom_V12',
181
+ 'martineux/miaomiao-realskin1p25-xl',
182
+ 'martineux/miaov18',
183
+ 'John6666/garage-mix-noob-vpred-eps-v10-vpred-sdxl',
184
+ 'TestOrganizationPleaseIgnore/perfectrsbmixIllustrious_definitivelambda_sdxl',
185
+ 'TestOrganizationPleaseIgnore/rinFlanimeIllustrious_v27_sdxl',
186
+ 'TestOrganizationPleaseIgnore/rinAnimepopcute_v30_sdxl',
187
+ 'TestOrganizationPleaseIgnore/potato_quality_anime_zzz_sdxl',
188
  'https://huggingface.co/chemwolf/Karmix-XL-v0/resolve/main/Karmix-XL-v0.safetensors?download=true',
189
  'https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16',
190
  'https://civitai.com/models/30240?modelVersionId=125771',
 
637
  - Try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
638
  - `DiffuseCraft` in Colab:[link](https://github.com/R3gm/DiffuseCraft?tab=readme-ov-file#diffusecraft).
639
  """
640
+ )
modutils.py CHANGED
@@ -1753,7 +1753,7 @@ EXAMPLES_GUI = [
1753
  "Euler",
1754
  1024,
1755
  1024,
1756
- "Raelina/Raehoshi-illust-XL-6",
1757
  ],
1758
  [
1759
  "yoshida yuuko, machikado mazoku, 1girl, solo, demon horns,horns, school uniform, long hair, open mouth, skirt, demon girl, ahoge, shiny, shiny hair, anime artwork",
 
1753
  "Euler",
1754
  1024,
1755
  1024,
1756
+ "Raelina/Raehoshi-illust-XL-8",
1757
  ],
1758
  [
1759
  "yoshida yuuko, machikado mazoku, 1girl, solo, demon horns,horns, school uniform, long hair, open mouth, skirt, demon girl, ahoge, shiny, shiny hair, anime artwork",
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
  stablepy==0.6.5
2
  diffusers
3
- transformers<=4.48.3
4
  accelerate
5
  huggingface_hub
6
  hf_transfer
 
1
  stablepy==0.6.5
2
  diffusers
3
+ transformers
4
  accelerate
5
  huggingface_hub
6
  hf_transfer