Spaces:
Paused
Paused
Update train.py
Browse files
train.py
CHANGED
|
@@ -3,72 +3,93 @@
|
|
| 3 |
import os
|
| 4 |
import torch
|
| 5 |
from huggingface_hub import snapshot_download
|
| 6 |
-
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
| 7 |
from peft import LoraConfig, get_peft_model
|
| 8 |
|
| 9 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
#
|
| 12 |
-
DATA_DIR = os.getenv("DATA_DIR", "./data")
|
| 13 |
|
| 14 |
-
|
| 15 |
MODEL_DIR = os.getenv("MODEL_DIR", "./hidream-model")
|
| 16 |
-
|
| 17 |
-
# Where to save your LoRAβfineβtuned model
|
| 18 |
OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./lora-trained")
|
| 19 |
|
| 20 |
-
#
|
| 21 |
-
|
| 22 |
-
print(f"π Loading dataset from: {DATA_DIR}")
|
| 23 |
-
print("π₯ Fetching or verifying base model: HiDream-ai/HiDream-I1-Dev")
|
| 24 |
|
| 25 |
-
# If youβve preβdownloaded into MODEL_DIR, just use it; otherwise pull from HF Hub
|
| 26 |
if not os.path.isdir(MODEL_DIR):
|
| 27 |
MODEL_DIR = snapshot_download(
|
| 28 |
repo_id="HiDream-ai/HiDream-I1-Dev",
|
| 29 |
local_dir=MODEL_DIR
|
| 30 |
)
|
| 31 |
|
| 32 |
-
#
|
| 33 |
|
| 34 |
-
#
|
| 35 |
-
# but your installed version doesnβt have that class. Instead we
|
| 36 |
-
# forceβload DPMSolverMultistepScheduler via `from_pretrained`.
|
| 37 |
-
print(f"π Loading scheduler from: {MODEL_DIR}/scheduler")
|
| 38 |
scheduler = DPMSolverMultistepScheduler.from_pretrained(
|
| 39 |
-
|
| 40 |
subfolder="scheduler"
|
| 41 |
)
|
| 42 |
|
| 43 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
| 48 |
scheduler=scheduler,
|
| 49 |
-
torch_dtype=torch.float16,
|
| 50 |
).to("cuda")
|
| 51 |
|
| 52 |
-
#
|
| 53 |
|
| 54 |
-
print("π§ Configuring LoRA adapter on UβNet")
|
| 55 |
lora_config = LoraConfig(
|
| 56 |
r=16,
|
| 57 |
lora_alpha=16,
|
| 58 |
bias="none",
|
| 59 |
-
task_type="CAUSAL_LM"
|
| 60 |
)
|
| 61 |
pipe.unet = get_peft_model(pipe.unet, lora_config)
|
| 62 |
|
| 63 |
-
#
|
| 64 |
|
| 65 |
-
print("
|
| 66 |
for step in range(100):
|
| 67 |
-
#
|
| 68 |
-
print(f"
|
| 69 |
|
| 70 |
-
#
|
| 71 |
|
| 72 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 73 |
pipe.save_pretrained(OUTPUT_DIR)
|
| 74 |
-
print("β
|
|
|
|
| 3 |
import os
|
| 4 |
import torch
|
| 5 |
from huggingface_hub import snapshot_download
|
|
|
|
| 6 |
from peft import LoraConfig, get_peft_model
|
| 7 |
|
| 8 |
+
# 1οΈβ£ Pick your scheduler class
|
| 9 |
+
from diffusers import (
|
| 10 |
+
StableDiffusionPipeline,
|
| 11 |
+
DPMSolverMultistepScheduler,
|
| 12 |
+
UNet2DConditionModel,
|
| 13 |
+
AutoencoderKL,
|
| 14 |
+
)
|
| 15 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 16 |
|
| 17 |
+
# βββ 1) CONFIG ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
| 18 |
|
| 19 |
+
DATA_DIR = os.getenv("DATA_DIR", "./data")
|
| 20 |
MODEL_DIR = os.getenv("MODEL_DIR", "./hidream-model")
|
|
|
|
|
|
|
| 21 |
OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./lora-trained")
|
| 22 |
|
| 23 |
+
# βββ 2) DOWNLOAD OR VERIFY BASE MODEL ββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
|
|
|
|
|
|
| 24 |
|
|
|
|
| 25 |
if not os.path.isdir(MODEL_DIR):
|
| 26 |
MODEL_DIR = snapshot_download(
|
| 27 |
repo_id="HiDream-ai/HiDream-I1-Dev",
|
| 28 |
local_dir=MODEL_DIR
|
| 29 |
)
|
| 30 |
|
| 31 |
+
# βββ 3) LOAD EACH PIPELINE COMPONENT ββββββββββββββββββββββββββββββββββββββββββ
|
| 32 |
|
| 33 |
+
# 3a) Scheduler
|
|
|
|
|
|
|
|
|
|
| 34 |
scheduler = DPMSolverMultistepScheduler.from_pretrained(
|
| 35 |
+
MODEL_DIR,
|
| 36 |
subfolder="scheduler"
|
| 37 |
)
|
| 38 |
|
| 39 |
+
# 3b) VAE
|
| 40 |
+
vae = AutoencoderKL.from_pretrained(
|
| 41 |
+
MODEL_DIR,
|
| 42 |
+
subfolder="vae",
|
| 43 |
+
torch_dtype=torch.float16
|
| 44 |
+
).to("cuda")
|
| 45 |
+
|
| 46 |
+
# 3c) Text encoder + tokenizer
|
| 47 |
+
text_encoder = CLIPTextModel.from_pretrained(
|
| 48 |
+
MODEL_DIR,
|
| 49 |
+
subfolder="text_encoder",
|
| 50 |
+
torch_dtype=torch.float16
|
| 51 |
+
).to("cuda")
|
| 52 |
+
tokenizer = CLIPTokenizer.from_pretrained(
|
| 53 |
+
MODEL_DIR,
|
| 54 |
+
subfolder="tokenizer"
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
# 3d) UβNet
|
| 58 |
+
unet = UNet2DConditionModel.from_pretrained(
|
| 59 |
+
MODEL_DIR,
|
| 60 |
+
subfolder="unet",
|
| 61 |
+
torch_dtype=torch.float16
|
| 62 |
+
).to("cuda")
|
| 63 |
+
|
| 64 |
+
# βββ 4) BUILD THE PIPELINE ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 65 |
|
| 66 |
+
pipe = StableDiffusionPipeline(
|
| 67 |
+
vae=vae,
|
| 68 |
+
text_encoder=text_encoder,
|
| 69 |
+
tokenizer=tokenizer,
|
| 70 |
+
unet=unet,
|
| 71 |
scheduler=scheduler,
|
|
|
|
| 72 |
).to("cuda")
|
| 73 |
|
| 74 |
+
# βββ 5) APPLY LORA ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 75 |
|
|
|
|
| 76 |
lora_config = LoraConfig(
|
| 77 |
r=16,
|
| 78 |
lora_alpha=16,
|
| 79 |
bias="none",
|
| 80 |
+
task_type="CAUSAL_LM",
|
| 81 |
)
|
| 82 |
pipe.unet = get_peft_model(pipe.unet, lora_config)
|
| 83 |
|
| 84 |
+
# βββ 6) TRAINING LOOP (SIMULATED) βββββββββββββββββββββββββββββββββββββββββββββ
|
| 85 |
|
| 86 |
+
print(f"π Data at {DATA_DIR}")
|
| 87 |
for step in range(100):
|
| 88 |
+
# β¦ your real data loading + optimizer here β¦
|
| 89 |
+
print(f"Training step {step+1}/100")
|
| 90 |
|
| 91 |
+
# βββ 7) SAVE THE FINEβTUNED LOβRA βββββββββββββββββββββββββββββββββββββββββββββ
|
| 92 |
|
| 93 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 94 |
pipe.save_pretrained(OUTPUT_DIR)
|
| 95 |
+
print("β
Done! Saved to", OUTPUT_DIR)
|