at_test / combined_script.py
corsair4090's picture
Upload 10 files
e250d47 verified
import os
import toml
def process_flux_checkpoint(project_root):
print("[INFO] Starting FluxCheckpoint configuration...")
# ------------------------------
# Initial Setup for FluxCheckpoint
# ------------------------------
output_dir = os.path.join(project_root, "output")
batch_config_dir = os.path.join(project_root, "BatchConfig", "Flux")
os.makedirs(batch_config_dir, exist_ok=True)
# Base Configuration for FluxCheckpoint
base_config_flux = """
adaptive_noise_scale = 0
ae = "./models/trainX/ae.safetensors"
blocks_to_swap = 9
bucket_no_upscale = true
bucket_reso_steps = 64
cache_latents = true
cache_latents_to_disk = true
cache_text_encoder_outputs = true
cache_text_encoder_outputs_to_disk = true
caption_dropout_every_n_epochs = 0
caption_dropout_rate = 0
caption_extension = ".txt"
clip_l = "./models/trainX/clip_l.safetensors"
discrete_flow_shift = 3.1582
double_blocks_to_swap = 0
dynamo_backend = "no"
max_train_epochs = 1
full_bf16 = true
fused_backward_pass = true
gradient_accumulation_steps = 1
gradient_checkpointing = true
guidance_scale = 1
huber_c = 0.1
huber_schedule = "snr"
keep_tokens = 0
learning_rate = 4e-6
learning_rate_te = 0
logging_dir = "Z"
loss_type = "l2"
lr_scheduler = "constant"
lr_scheduler_args = []
lr_scheduler_num_cycles = 1
lr_scheduler_power = 1
lr_warmup_steps = 0
max_bucket_reso = 2048
max_data_loader_n_workers = 0
max_timestep = 1000
max_token_length = 75
mem_eff_save = true
min_bucket_reso = 256
mixed_precision = "bf16"
model_prediction_type = "raw"
multires_noise_discount = 0.3
multires_noise_iterations = 0
noise_offset = 0
noise_offset_type = "Original"
optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False", "weight_decay=0.01",]
optimizer_type = "Adafactor"
output_dir = "Z"
output_name = "m1u"
persistent_data_loader_workers = 0
pretrained_model_name_or_path = "./models/trainX/flux1-dev-fp8.safetensors"
prior_loss_weight = 1
resolution = "1024,1024"
sample_every_n_steps = 250
sample_prompts = "./sample_prompts.txt"
sample_sampler = "euler_a"
save_model_as = "safetensors"
save_precision = "fp16"
seed = 1
single_blocks_to_swap = 0
t5xxl = "./models/trainX/t5xxl_fp8_e4m3fn.safetensors"
t5xxl_max_token_length = 512
timestep_sampling = "sigmoid"
train_batch_size = 1
train_blocks = "all"
train_data_dir = "Z"
vae_batch_size = 4
wandb_run_name = "m1u"
xformers = true
"""
try:
config_dict_base_flux = toml.loads(base_config_flux)
except toml.TomlDecodeError as e:
print(f"[ERROR] Failed to parse FluxCheckpoint base configuration: {e}")
return False
# Get list of folders in output directory
try:
folders = [f for f in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, f))]
except FileNotFoundError:
print(f"[WARNING] The directory '{output_dir}' does not exist.")
folders = []
if not folders:
print(f"[INFO] No folders found in '{output_dir}'.")
else:
for folder in folders:
# Create necessary subdirectories
subdirs = ["model", "log", "img"]
for subdir in subdirs:
subdir_path = os.path.join(output_dir, folder, subdir)
os.makedirs(subdir_path, exist_ok=True)
# Create a copy of the base configuration
config_dict = config_dict_base_flux.copy()
# Define keys that represent paths
path_keys = [
"ae",
"clip_l",
"pretrained_model_name_or_path",
"t5xxl",
"sample_prompts"
]
# Convert relative paths to absolute paths with forward slashes
for key in path_keys:
if key in config_dict:
original_path = config_dict[key]
absolute_path = os.path.abspath(os.path.join(project_root, original_path))
absolute_path = absolute_path.replace('\\', '/')
config_dict[key] = absolute_path
# Update specific paths based on folder name
config_dict['output_dir'] = os.path.abspath(os.path.join(output_dir, folder, "model")).replace('\\', '/')
config_dict['output_name'] = folder
config_dict['logging_dir'] = os.path.abspath(os.path.join(output_dir, folder, "log")).replace('\\', '/')
config_dict['train_data_dir'] = os.path.abspath(os.path.join(output_dir, folder, "img")).replace('\\', '/')
# Verify the existence of paths
required_paths = [
config_dict.get("ae"),
config_dict.get("clip_l"),
config_dict.get("pretrained_model_name_or_path"),
config_dict.get("t5xxl"),
config_dict.get("sample_prompts"),
config_dict.get("output_dir"),
config_dict.get("logging_dir"),
config_dict.get("train_data_dir")
]
missing_paths = [path for path in required_paths if path and not os.path.exists(path)]
if missing_paths:
print(f"[WARNING] Some paths do not exist for folder '{folder}':")
for path in missing_paths:
print(f" - {path}")
print("Please ensure that all necessary files and directories are present.\n")
# Create the TOML file name and path
toml_filename = f"{folder}.toml"
toml_path = os.path.join(batch_config_dir, toml_filename)
# Write the configuration in TOML format
try:
with open(toml_path, 'w') as toml_file:
toml.dump(config_dict, toml_file)
print(f"[SUCCESS] FluxCheckpoint configuration file created: {toml_path}")
except Exception as e:
print(f"[ERROR] Failed to write FluxCheckpoint TOML file '{toml_path}': {e}")
print("[INFO] FluxCheckpoint configuration completed.\n")
return True
def process_flux_lora(project_root):
print("[INFO] Starting FluxLora configuration...")
# ------------------------------
# Initial Setup for FluxLora
# ------------------------------
output_dir = os.path.join(project_root, "output")
batch_config_dir = os.path.join(project_root, "BatchConfig", "FluxLORA")
os.makedirs(batch_config_dir, exist_ok=True)
# Base Configuration for FluxLora
base_config_lora = """
ae = "./models/trainX/ae.safetensors"
bucket_no_upscale = true
bucket_reso_steps = 64
cache_latents = true
cache_latents_to_disk = true
cache_text_encoder_outputs = true
cache_text_encoder_outputs_to_disk = true
caption_extension = ".txt"
clip_l = "./models/trainX/clip_l.safetensors"
discrete_flow_shift = 3.1582
dynamo_backend = "no"
max_train_epochs = 1
fp8_base = true
full_bf16 = true
gradient_accumulation_steps = 1
gradient_checkpointing = true
guidance_scale = 1.0
highvram = true
huber_c = 0.1
huber_schedule = "snr"
logging_dir = "X"
loss_type = "l2"
lr_scheduler = "constant"
lr_scheduler_args = []
lr_scheduler_num_cycles = 1
lr_scheduler_power = 1
max_bucket_reso = 2048
max_data_loader_n_workers = 0
max_timestep = 1000
mem_eff_save = true
min_bucket_reso = 256
mixed_precision = "bf16"
model_prediction_type = "raw"
network_alpha = 128
network_args = ["train_double_block_indices=all", "train_single_block_indices=all"]
network_dim = 128
network_module = "networks.lora_flux"
noise_offset_type = "Original"
optimizer_args = ["scale_parameter=False", "relative_step=False", "warmup_init=False", "weight_decay=0.01"]
optimizer_type = "Adafactor"
output_dir = "X"
output_name = "X"
pretrained_model_name_or_path = "./models/trainX/flux1-dev-fp8.safetensors"
prior_loss_weight = 1
resolution = "1024,1280"
sample_every_n_steps = 250
sample_prompts = "./sample_prompts.txt"
sample_sampler = "euler_a"
save_model_as = "safetensors"
save_precision = "float"
seed = 1
t5xxl = "./models/trainX/t5xxl_fp8_e4m3fn.safetensors"
t5xxl_max_token_length = 512
text_encoder_lr = [5e-5, 5e-5]
timestep_sampling = "sigmoid"
train_batch_size = 1
train_data_dir = "X"
unet_lr = 5e-5
vae_batch_size = 4
wandb_run_name = "X"
xformers = true
"""
try:
config_dict_base_lora = toml.loads(base_config_lora)
except toml.TomlDecodeError as e:
print(f"[ERROR] Failed to parse FluxLora base configuration: {e}")
return False
# Get list of folders in output directory
try:
folders = [f for f in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, f))]
except FileNotFoundError:
print(f"[WARNING] The directory '{output_dir}' does not exist.")
folders = []
if not folders:
print(f"[INFO] No folders found in '{output_dir}'.")
else:
for folder in folders:
# Create necessary subdirectories
subdirs = ["model", "log", "img"]
for subdir in subdirs:
subdir_path = os.path.join(output_dir, folder, subdir)
os.makedirs(subdir_path, exist_ok=True)
# Create a copy of the base configuration
config_dict = config_dict_base_lora.copy()
# Define keys that represent paths
path_keys = [
"ae",
"clip_l",
"pretrained_model_name_or_path",
"t5xxl",
"sample_prompts",
"output_dir",
"logging_dir",
"train_data_dir"
]
# Convert relative paths to absolute paths with forward slashes
for key in path_keys:
if key in config_dict and config_dict[key] != "X":
original_path = config_dict[key]
absolute_path = os.path.abspath(os.path.join(project_root, original_path))
absolute_path = absolute_path.replace('\\', '/')
config_dict[key] = absolute_path
elif key in config_dict and config_dict[key] == "X":
# Update paths based on the current folder
if key == "output_dir":
config_dict[key] = os.path.abspath(os.path.join(output_dir, folder, "model")).replace('\\', '/')
elif key == "logging_dir":
config_dict[key] = os.path.abspath(os.path.join(output_dir, folder, "log")).replace('\\', '/')
elif key == "train_data_dir":
config_dict[key] = os.path.abspath(os.path.join(output_dir, folder, "img")).replace('\\', '/')
else:
# For keys that are set to "X" and not explicitly handled
config_dict[key] = "X"
# Update 'output_name' based on folder name
config_dict['output_name'] = folder
# Verify the existence of paths
required_paths = [
config_dict.get("ae"),
config_dict.get("clip_l"),
config_dict.get("pretrained_model_name_or_path"),
config_dict.get("t5xxl"),
config_dict.get("sample_prompts"),
config_dict.get("output_dir"),
config_dict.get("logging_dir"),
config_dict.get("train_data_dir")
]
missing_paths = [path for path in required_paths if path and path != "X" and not os.path.exists(path)]
if missing_paths:
print(f"[WARNING] Some paths do not exist for folder '{folder}':")
for path in missing_paths:
print(f" - {path}")
print("Please ensure that all necessary files and directories are present.\n")
else:
print(f"[INFO] All required paths exist for folder '{folder}'.")
# Create the TOML file name and path
toml_filename = f"{folder}.toml"
toml_path = os.path.join(batch_config_dir, toml_filename)
# Write the configuration in TOML format
try:
with open(toml_path, 'w') as toml_file:
toml.dump(config_dict, toml_file)
print(f"[SUCCESS] FluxLora configuration file created: {toml_path}")
except Exception as e:
print(f"[ERROR] Failed to write FluxLora TOML file '{toml_path}': {e}")
print("[INFO] FluxLora configuration completed.\n")
return True
def process_sdxl_nude(project_root):
print("[INFO] Starting SDXLNude configuration...")
# ------------------------------
# Initial Setup for SDXLNude
# ------------------------------
output_dir = os.path.join(project_root, "output")
batch_config_dir = os.path.join(project_root, "BatchConfig", "Nude")
os.makedirs(batch_config_dir, exist_ok=True)
sample_prompts_path = os.path.join(project_root, "sample_prompts.txt")
# Base Configuration for SDXLNude
base_config_nude = """
adaptive_noise_scale = 0
bucket_no_upscale = true
bucket_reso_steps = 64
cache_latents = true
cache_latents_to_disk = true
caption_dropout_every_n_epochs = 0
caption_dropout_rate = 0
clip_skip = 1
dynamo_backend = "no"
max_train_epochs = 10
full_bf16 = true
gradient_accumulation_steps = 1
gradient_checkpointing = true
huber_c = 0.1
huber_schedule = "snr"
ip_noise_gamma = 0.1
keep_tokens = 0
learning_rate = 8e-6
learning_rate_te1 = 3e-6
learning_rate_te2 = 0
logging_dir = "C:/Users/newec/Documents/sepuedeborrar"
loss_type = "l2"
lr_scheduler = "constant"
lr_scheduler_args = []
lr_scheduler_num_cycles = 1
lr_scheduler_power = 1
lr_warmup_steps = 0
max_bucket_reso = 2048
max_data_loader_n_workers = 0
max_timestep = 1000
max_token_length = 75
min_bucket_reso = 256
mixed_precision = "bf16"
multires_noise_discount = 0
multires_noise_iterations = 0
noise_offset = 0.0357
noise_offset_type = "Original"
optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False",
"weight_decay=0.01",]
optimizer_type = "Adafactor"
output_dir = "C:/Users/newec/Documents/sepuedeborrar"
output_name = "indaX"
persistent_data_loader_workers = 0
pretrained_model_name_or_path = "models/NudeSDXLModel.safetensors"
prior_loss_weight = 1
resolution = "1024,1280"
sample_every_n_steps = 250
sample_prompts = ""
sample_sampler = "euler_a"
save_model_as = "safetensors"
save_precision = "bf16"
train_batch_size = 1
train_data_dir = "C:/Users/newec/Documents/sepuedeborrar"
train_text_encoder = true
vae = "stabilityai/sdxl-vae"
vae_batch_size = 2
wandb_run_name = "X"
"""
try:
config_dict_base_nude = toml.loads(base_config_nude)
except toml.TomlDecodeError as e:
print(f"[ERROR] Failed to parse SDXLNude base configuration: {e}")
return False
# Update the 'sample_prompts' key with the absolute path
config_dict_base_nude["sample_prompts"] = sample_prompts_path.replace('\\', '/')
# Get list of folders in output directory
try:
folders = [f for f in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, f))]
except FileNotFoundError:
print(f"[WARNING] The directory '{output_dir}' does not exist.")
folders = []
if not folders:
print(f"[INFO] No folders found in '{output_dir}'.")
else:
for folder in folders:
# Create necessary subdirectories
subdirs = ["model", "log", "img"]
for subdir in subdirs:
subdir_path = os.path.join(output_dir, folder, subdir)
os.makedirs(subdir_path, exist_ok=True)
print(f"[INFO] Subdirectory '{subdir_path}' ensured.")
# Create a copy of the base configuration
config_dict = config_dict_base_nude.copy()
# Define keys that represent paths
path_keys = [
"pretrained_model_name_or_path"
]
# Convert relative paths to absolute paths with forward slashes
for key in path_keys:
if key in config_dict:
original_path = config_dict[key]
absolute_path = os.path.abspath(os.path.join(project_root, "models/trainX/NudeSDXLModel.safetensors"))
absolute_path = absolute_path.replace('\\', '/')
config_dict[key] = absolute_path
# Update specific paths based on folder name
config_dict['output_dir'] = os.path.abspath(os.path.join(output_dir, folder, "model")).replace('\\', '/')
config_dict['output_name'] = folder
config_dict['logging_dir'] = os.path.abspath(os.path.join(output_dir, folder, "log")).replace('\\', '/')
config_dict['train_data_dir'] = os.path.abspath(os.path.join(output_dir, folder, "img")).replace('\\', '/')
# Verify the existence of paths
required_paths = [
config_dict.get("pretrained_model_name_or_path"),
config_dict.get("sample_prompts"),
config_dict.get("output_dir"),
config_dict.get("logging_dir"),
config_dict.get("train_data_dir")
]
missing_paths = [path for path in required_paths if path and not os.path.exists(path)]
if missing_paths:
print(f"[WARNING] Some paths do not exist for folder '{folder}':")
for path in missing_paths:
print(f" - {path}")
print("Please ensure that all necessary files and directories are present.\n")
else:
print(f"[INFO] All required paths exist for folder '{folder}'.")
# Create the TOML file name and path
toml_filename = f"{folder}.toml"
toml_path = os.path.join(batch_config_dir, toml_filename)
# Write the configuration in TOML format
try:
with open(toml_path, 'w') as toml_file:
toml.dump(config_dict, toml_file)
print(f"[SUCCESS] SDXLNude configuration file created: {toml_path}\n")
except Exception as e:
print(f"[ERROR] Failed to write SDXLNude TOML file '{toml_path}': {e}\n")
print("[INFO] All SDXLNude configuration files have been generated successfully.\n")
# Verify required subdirectories
if folders:
all_missing = False
for folder in folders:
required_subdirs = [
os.path.join(output_dir, folder, "model"),
os.path.join(output_dir, folder, "log"),
os.path.join(output_dir, folder, "img")
]
for path in required_subdirs:
if not os.path.exists(path):
print(f"[ERROR] Required subdirectory '{path}' is missing.")
all_missing = True
if all_missing:
print("\n[ERROR] Some required directories are missing. Please ensure all directories are correctly set up.")
else:
print("\n[INFO] All required directories are present.")
print("[INFO] SDXLNude configuration completed.\n")
return True
def main():
# Path to the project's root directory (where this script is located)
project_root = os.path.abspath(os.path.dirname(__file__))
# Execute FluxCheckpoint configuration
flux_checkpoint_success = process_flux_checkpoint(project_root)
if flux_checkpoint_success:
print("[SUCCESS] FluxCheckpoint executed successfully.\n")
else:
print("[FAILURE] FluxCheckpoint encountered errors.\n")
# Execute FluxLora configuration
flux_lora_success = process_flux_lora(project_root)
if flux_lora_success:
print("[SUCCESS] FluxLora executed successfully.\n")
else:
print("[FAILURE] FluxLora encountered errors.\n")
# Execute SDXLNude configuration
sdxl_nude_success = process_sdxl_nude(project_root)
if sdxl_nude_success:
print("[SUCCESS] SDXLNude executed successfully.\n")
else:
print("[FAILURE] SDXLNude encountered errors.\n")
# Final status
if flux_checkpoint_success and flux_lora_success and sdxl_nude_success:
print("[INFO] All configurations have been executed successfully.")
else:
print("[INFO] Some configurations encountered errors. Please check the logs above.")
if __name__ == "__main__":
main()