End of training
Browse files
README.md
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
---
|
| 3 |
+
license: creativeml-openrail-m
|
| 4 |
+
base_model: /mnt/blob/stable-diffusion-xl-base-1.0
|
| 5 |
+
dataset: lambdalabs/pokemon-blip-captions
|
| 6 |
+
tags:
|
| 7 |
+
- stable-diffusion-xl
|
| 8 |
+
- stable-diffusion-xl-diffusers
|
| 9 |
+
- text-to-image
|
| 10 |
+
- diffusers
|
| 11 |
+
- lora
|
| 12 |
+
inference: true
|
| 13 |
+
---
|
| 14 |
+
|
| 15 |
+
# LoRA text2image fine-tuning - Chenhsing/sd-pokemon-model-lora-sdxl
|
| 16 |
+
|
| 17 |
+
These are LoRA adaption weights for /mnt/blob/stable-diffusion-xl-base-1.0. The weights were fine-tuned on the lambdalabs/pokemon-blip-captions dataset. You can find some example images in the following.
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
LoRA for the text encoder was enabled: False.
|
| 22 |
+
|
| 23 |
+
Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
|
logs/text2image-fine-tune/1693990583.324526/events.out.tfevents.1693990583.PHLRR4032.5119.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b56ffef6f62b3e5cd09abdacd0103313d71406a2c1400d239b60d18cc34616a0
|
| 3 |
+
size 2402
|
logs/text2image-fine-tune/1693990583.4341908/hparams.yml
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
adam_beta1: 0.9
|
| 2 |
+
adam_beta2: 0.999
|
| 3 |
+
adam_epsilon: 1.0e-08
|
| 4 |
+
adam_weight_decay: 0.01
|
| 5 |
+
allow_tf32: false
|
| 6 |
+
cache_dir: null
|
| 7 |
+
caption_column: text
|
| 8 |
+
center_crop: false
|
| 9 |
+
checkpointing_steps: 500
|
| 10 |
+
checkpoints_total_limit: null
|
| 11 |
+
dataloader_num_workers: 0
|
| 12 |
+
dataset_config_name: null
|
| 13 |
+
dataset_name: lambdalabs/pokemon-blip-captions
|
| 14 |
+
enable_xformers_memory_efficient_attention: false
|
| 15 |
+
gradient_accumulation_steps: 1
|
| 16 |
+
gradient_checkpointing: false
|
| 17 |
+
hub_model_id: null
|
| 18 |
+
hub_token: null
|
| 19 |
+
image_column: image
|
| 20 |
+
learning_rate: 0.0001
|
| 21 |
+
local_rank: 0
|
| 22 |
+
logging_dir: logs
|
| 23 |
+
lr_scheduler: constant
|
| 24 |
+
lr_warmup_steps: 0
|
| 25 |
+
max_grad_norm: 1.0
|
| 26 |
+
max_train_samples: null
|
| 27 |
+
max_train_steps: 210
|
| 28 |
+
mixed_precision: fp16
|
| 29 |
+
noise_offset: 0
|
| 30 |
+
num_train_epochs: 2
|
| 31 |
+
num_validation_images: 4
|
| 32 |
+
output_dir: sd-pokemon-model-lora-sdxl
|
| 33 |
+
prediction_type: null
|
| 34 |
+
pretrained_model_name_or_path: /mnt/blob/stable-diffusion-xl-base-1.0
|
| 35 |
+
pretrained_vae_model_name_or_path: madebyollin/sdxl-vae-fp16-fix
|
| 36 |
+
push_to_hub: true
|
| 37 |
+
random_flip: true
|
| 38 |
+
rank: 4
|
| 39 |
+
report_to: tensorboard
|
| 40 |
+
resolution: 1024
|
| 41 |
+
resume_from_checkpoint: null
|
| 42 |
+
revision: null
|
| 43 |
+
scale_lr: false
|
| 44 |
+
seed: 42
|
| 45 |
+
snr_gamma: null
|
| 46 |
+
train_batch_size: 1
|
| 47 |
+
train_data_dir: null
|
| 48 |
+
train_text_encoder: false
|
| 49 |
+
use_8bit_adam: false
|
| 50 |
+
validation_epochs: 1
|
| 51 |
+
validation_prompt: null
|
logs/text2image-fine-tune/events.out.tfevents.1693990583.PHLRR4032.5119.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a70d343fbdcd8b354e7fbcbda0f9bce0e33509f3a3af049c23603a00662ea346
|
| 3 |
+
size 10251
|
pytorch_lora_weights.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8656c5cf3d60029636885f126c367cee40f8b8f0234054990e6db5de54846b39
|
| 3 |
+
size 23401064
|