Sri2901 commited on
Commit
1d24e84
·
verified ·
1 Parent(s): ef00e83

Upload model via CLI

Browse files
Files changed (49) hide show
  1. .job_config.json +133 -0
  2. README.md +54 -0
  3. backpack_poses.safetensors +3 -0
  4. backpack_poses_000000250.safetensors +3 -0
  5. backpack_poses_000000500.safetensors +3 -0
  6. backpack_poses_000000750.safetensors +3 -0
  7. backpack_poses_000001000.safetensors +3 -0
  8. backpack_poses_000001250.safetensors +3 -0
  9. backpack_poses_000001500.safetensors +3 -0
  10. backpack_poses_000001750.safetensors +3 -0
  11. config.yaml +115 -0
  12. log.txt +0 -0
  13. optimizer.pt +3 -0
  14. samples/1756374345775__000000000_0.jpg +0 -0
  15. samples/1756374363368__000000000_1.jpg +0 -0
  16. samples/1756374381265__000000000_2.jpg +0 -0
  17. samples/1756374399064__000000000_3.jpg +0 -0
  18. samples/1756375298906__000000250_0.jpg +0 -0
  19. samples/1756375316884__000000250_1.jpg +0 -0
  20. samples/1756375334854__000000250_2.jpg +0 -0
  21. samples/1756375353333__000000250_3.jpg +0 -0
  22. samples/1756376207911__000000500_0.jpg +0 -0
  23. samples/1756376225865__000000500_1.jpg +0 -0
  24. samples/1756376243828__000000500_2.jpg +0 -0
  25. samples/1756376261779__000000500_3.jpg +0 -0
  26. samples/1756377090990__000000750_0.jpg +0 -0
  27. samples/1756377109045__000000750_1.jpg +0 -0
  28. samples/1756377127054__000000750_2.jpg +0 -0
  29. samples/1756377145099__000000750_3.jpg +0 -0
  30. samples/1756377982836__000001000_0.jpg +0 -0
  31. samples/1756378000801__000001000_1.jpg +0 -0
  32. samples/1756378018804__000001000_2.jpg +0 -0
  33. samples/1756378036823__000001000_3.jpg +0 -0
  34. samples/1756378886491__000001250_0.jpg +0 -0
  35. samples/1756378904513__000001250_1.jpg +0 -0
  36. samples/1756378922533__000001250_2.jpg +0 -0
  37. samples/1756378940577__000001250_3.jpg +0 -0
  38. samples/1756379777687__000001500_0.jpg +0 -0
  39. samples/1756379795729__000001500_1.jpg +0 -0
  40. samples/1756379813791__000001500_2.jpg +0 -0
  41. samples/1756379831902__000001500_3.jpg +0 -0
  42. samples/1756380653427__000001750_0.jpg +0 -0
  43. samples/1756380671561__000001750_1.jpg +0 -0
  44. samples/1756380689581__000001750_2.jpg +0 -0
  45. samples/1756380707690__000001750_3.jpg +0 -0
  46. samples/1756381577140__000002000_0.jpg +0 -0
  47. samples/1756381595151__000002000_1.jpg +0 -0
  48. samples/1756381613125__000002000_2.jpg +0 -0
  49. samples/1756381631089__000002000_3.jpg +0 -0
.job_config.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "job": "extension",
3
+ "config": {
4
+ "name": "backpack_poses",
5
+ "process": [
6
+ {
7
+ "type": "ui_trainer",
8
+ "training_folder": "/ai-toolkit/output",
9
+ "sqlite_db_path": "/ai-toolkit/aitk_db.db",
10
+ "device": "cuda",
11
+ "trigger_word": "b@ckp@se",
12
+ "performance_log_every": 10,
13
+ "network": {
14
+ "type": "lora",
15
+ "linear": 32,
16
+ "linear_alpha": 32,
17
+ "conv": 16,
18
+ "conv_alpha": 16,
19
+ "lokr_full_rank": true,
20
+ "lokr_factor": -1,
21
+ "network_kwargs": {
22
+ "ignore_if_contains": []
23
+ }
24
+ },
25
+ "save": {
26
+ "dtype": "bf16",
27
+ "save_every": 250,
28
+ "max_step_saves_to_keep": 8,
29
+ "save_format": "diffusers",
30
+ "push_to_hub": false
31
+ },
32
+ "datasets": [
33
+ {
34
+ "folder_path": "/ai-toolkit/datasets/backpack_poses",
35
+ "control_path": null,
36
+ "mask_path": null,
37
+ "mask_min_value": 0.1,
38
+ "default_caption": "",
39
+ "caption_ext": "txt",
40
+ "caption_dropout_rate": 0.05,
41
+ "cache_latents_to_disk": false,
42
+ "is_reg": false,
43
+ "network_weight": 1,
44
+ "resolution": [
45
+ 512,
46
+ 768,
47
+ 1024,
48
+ 1280,
49
+ 1536
50
+ ],
51
+ "controls": [],
52
+ "shrink_video_to_frames": true,
53
+ "num_frames": 1,
54
+ "do_i2v": true,
55
+ "flip_x": false,
56
+ "flip_y": false
57
+ }
58
+ ],
59
+ "train": {
60
+ "batch_size": 1,
61
+ "bypass_guidance_embedding": false,
62
+ "steps": 2000,
63
+ "gradient_accumulation": 1,
64
+ "train_unet": true,
65
+ "train_text_encoder": false,
66
+ "gradient_checkpointing": true,
67
+ "noise_scheduler": "flowmatch",
68
+ "optimizer": "adamw8bit",
69
+ "timestep_type": "sigmoid",
70
+ "content_or_style": "balanced",
71
+ "optimizer_params": {
72
+ "weight_decay": 0.0001
73
+ },
74
+ "unload_text_encoder": false,
75
+ "cache_text_embeddings": false,
76
+ "lr": 0.0002,
77
+ "ema_config": {
78
+ "use_ema": true,
79
+ "ema_decay": 0.99
80
+ },
81
+ "skip_first_sample": false,
82
+ "disable_sampling": false,
83
+ "dtype": "bf16",
84
+ "diff_output_preservation": false,
85
+ "diff_output_preservation_multiplier": 1,
86
+ "diff_output_preservation_class": "person",
87
+ "switch_boundary_every": 1
88
+ },
89
+ "model": {
90
+ "name_or_path": "black-forest-labs/FLUX.1-dev",
91
+ "quantize": false,
92
+ "qtype": "qfloat8",
93
+ "quantize_te": true,
94
+ "qtype_te": "qfloat8",
95
+ "arch": "flux",
96
+ "low_vram": false,
97
+ "model_kwargs": {}
98
+ },
99
+ "sample": {
100
+ "sampler": "flowmatch",
101
+ "sample_every": 250,
102
+ "width": 1024,
103
+ "height": 1024,
104
+ "samples": [
105
+ {
106
+ "prompt": "man standing three-quarter facing away, wearing a light beige sweater and black pants, carrying a small black crossbody bag diagonally across his torso with the bag resting at his side, left hand in pocket, expression neutral while glancing back over his shoulder"
107
+ },
108
+ {
109
+ "prompt": "man standing side-facing, dressed in a black blazer and grey trousers, holding a patterned duffel bag in his right hand at thigh level, bag structured with dual handles, expression neutral, gaze directed slightly towards camera"
110
+ },
111
+ {
112
+ "prompt": "man standing front-facing at slight angle, wearing a beige button-up jacket and brown trousers, carrying a black crossbody bag with strap resting on left shoulder, bag positioned at waist level, looking ahead with neutral expression"
113
+ },
114
+ {
115
+ "prompt": "man standing in partial side view, holding a grey Wildcraft duffle bag with neon green accents in his right hand at thigh level, wearing a light blue jacket, grey shorts, and white sneakers, face not visibl"
116
+ }
117
+ ],
118
+ "neg": "",
119
+ "seed": 42,
120
+ "walk_seed": true,
121
+ "guidance_scale": 3,
122
+ "sample_steps": 30,
123
+ "num_frames": 1,
124
+ "fps": 1
125
+ }
126
+ }
127
+ ]
128
+ },
129
+ "meta": {
130
+ "name": "[name]",
131
+ "version": "1.0"
132
+ }
133
+ }
README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - text-to-image
4
+ - flux
5
+ - lora
6
+ - diffusers
7
+ - template:sd-lora
8
+ - ai-toolkit
9
+ base_model: black-forest-labs/FLUX.1-dev
10
+ instance_prompt: b@ckp@se
11
+ license: other
12
+ license_name: flux-1-dev-non-commercial-license
13
+ license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
14
+ widget:
15
+ - text: Sample generation
16
+ output:
17
+ url: samples/1756374345775__000000000_0.jpg
18
+ - text: Sample generation
19
+ output:
20
+ url: samples/1756374363368__000000000_1.jpg
21
+ - text: Sample generation
22
+ output:
23
+ url: samples/1756374381265__000000000_2.jpg
24
+ ---
25
+
26
+ # backpack_poses
27
+
28
+ Model trained with AI Toolkit by Ostris
29
+
30
+ <Gallery />
31
+
32
+ ## Trigger words
33
+
34
+ You should use `b@ckp@se` to trigger the image generation.
35
+
36
+ ## Download model and use it with ComfyUI, AUTOMATIC1111, SD.Next, Invoke AI, etc.
37
+
38
+ Weights for this model are available in Safetensors format.
39
+
40
+ [Download](/username/backpack_poses/tree/main) them in the Files & versions tab.
41
+
42
+ ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
43
+
44
+ ```py
45
+ from diffusers import AutoPipelineForText2Image
46
+ import torch
47
+
48
+ pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.bfloat16).to('cuda')
49
+ pipeline.load_lora_weights('username/backpack_poses', weight_name='backpack_poses_000000250.safetensors')
50
+ image = pipeline('b@ckp@se style artwork').images[0]
51
+ image.save("my_image.png")
52
+ ```
53
+
54
+ For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
backpack_poses.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dc05e9ebc093d05697be80e178b6e0c6e96e6fe1f314062539a6637092ca4fe
3
+ size 343806440
backpack_poses_000000250.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e483bf7b57f2c5f883b9baf28884a98894b0e83257c0588c5bd651574db499d8
3
+ size 343806440
backpack_poses_000000500.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fa23a096d2110e44bc5265acc60ea0ef59ab52a32ce958a371853208a7fecfc
3
+ size 343806440
backpack_poses_000000750.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a69a71e8606369173221450c368d172a3b83881366642d148494697e53af432c
3
+ size 343806440
backpack_poses_000001000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dfab7411b95444d959e0e3c55c79ab035dcea488046f7f1ad3737eb97498b99
3
+ size 343806440
backpack_poses_000001250.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8dceea38cbd80c1568a1fd2583b672039e8059febe1a66d8adaec3eb6d13b13
3
+ size 343806440
backpack_poses_000001500.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90ae62da6928b63d973156b50ca8aa9eabf2c9d25479459f1e4f18470c3391cf
3
+ size 343806440
backpack_poses_000001750.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12c7360dc80b354166cc26bbc420f5c724bde61b2f7e35b1b24f5173b23173d7
3
+ size 343806440
config.yaml ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ job: extension
2
+ config:
3
+ name: backpack_poses
4
+ process:
5
+ - type: ui_trainer
6
+ training_folder: /ai-toolkit/output
7
+ sqlite_db_path: /ai-toolkit/aitk_db.db
8
+ device: cuda
9
+ trigger_word: b@ckp@se
10
+ performance_log_every: 10
11
+ network:
12
+ type: lora
13
+ linear: 32
14
+ linear_alpha: 32
15
+ conv: 16
16
+ conv_alpha: 16
17
+ lokr_full_rank: true
18
+ lokr_factor: -1
19
+ network_kwargs:
20
+ ignore_if_contains: []
21
+ save:
22
+ dtype: bf16
23
+ save_every: 250
24
+ max_step_saves_to_keep: 8
25
+ save_format: diffusers
26
+ push_to_hub: false
27
+ datasets:
28
+ - folder_path: /ai-toolkit/datasets/backpack_poses
29
+ control_path: null
30
+ mask_path: null
31
+ mask_min_value: 0.1
32
+ default_caption: ''
33
+ caption_ext: txt
34
+ caption_dropout_rate: 0.05
35
+ cache_latents_to_disk: false
36
+ is_reg: false
37
+ network_weight: 1
38
+ resolution:
39
+ - 512
40
+ - 768
41
+ - 1024
42
+ - 1280
43
+ - 1536
44
+ controls: []
45
+ shrink_video_to_frames: true
46
+ num_frames: 1
47
+ do_i2v: true
48
+ flip_x: false
49
+ flip_y: false
50
+ train:
51
+ batch_size: 1
52
+ bypass_guidance_embedding: false
53
+ steps: 2000
54
+ gradient_accumulation: 1
55
+ train_unet: true
56
+ train_text_encoder: false
57
+ gradient_checkpointing: true
58
+ noise_scheduler: flowmatch
59
+ optimizer: adamw8bit
60
+ timestep_type: sigmoid
61
+ content_or_style: balanced
62
+ optimizer_params:
63
+ weight_decay: 0.0001
64
+ unload_text_encoder: false
65
+ cache_text_embeddings: false
66
+ lr: 0.0002
67
+ ema_config:
68
+ use_ema: true
69
+ ema_decay: 0.99
70
+ skip_first_sample: false
71
+ disable_sampling: false
72
+ dtype: bf16
73
+ diff_output_preservation: false
74
+ diff_output_preservation_multiplier: 1
75
+ diff_output_preservation_class: person
76
+ switch_boundary_every: 1
77
+ model:
78
+ name_or_path: black-forest-labs/FLUX.1-dev
79
+ quantize: false
80
+ qtype: qfloat8
81
+ quantize_te: true
82
+ qtype_te: qfloat8
83
+ arch: flux
84
+ low_vram: false
85
+ model_kwargs: {}
86
+ sample:
87
+ sampler: flowmatch
88
+ sample_every: 250
89
+ width: 1024
90
+ height: 1024
91
+ samples:
92
+ - prompt: man standing three-quarter facing away, wearing a light beige sweater
93
+ and black pants, carrying a small black crossbody bag diagonally across
94
+ his torso with the bag resting at his side, left hand in pocket, expression
95
+ neutral while glancing back over his shoulder
96
+ - prompt: man standing side-facing, dressed in a black blazer and grey trousers,
97
+ holding a patterned duffel bag in his right hand at thigh level, bag structured
98
+ with dual handles, expression neutral, gaze directed slightly towards camera
99
+ - prompt: man standing front-facing at slight angle, wearing a beige button-up
100
+ jacket and brown trousers, carrying a black crossbody bag with strap resting
101
+ on left shoulder, bag positioned at waist level, looking ahead with neutral
102
+ expression
103
+ - prompt: man standing in partial side view, holding a grey Wildcraft duffle
104
+ bag with neon green accents in his right hand at thigh level, wearing a
105
+ light blue jacket, grey shorts, and white sneakers, face not visibl
106
+ neg: ''
107
+ seed: 42
108
+ walk_seed: true
109
+ guidance_scale: 3
110
+ sample_steps: 30
111
+ num_frames: 1
112
+ fps: 1
113
+ meta:
114
+ name: backpack_poses
115
+ version: '1.0'
log.txt ADDED
The diff for this file is too large to render. See raw diff
 
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfce26fc099250cb394cb364879d4b3c15ec4040a9cbab5209a8c6935f40a94a
3
+ size 350203413
samples/1756374345775__000000000_0.jpg ADDED
samples/1756374363368__000000000_1.jpg ADDED
samples/1756374381265__000000000_2.jpg ADDED
samples/1756374399064__000000000_3.jpg ADDED
samples/1756375298906__000000250_0.jpg ADDED
samples/1756375316884__000000250_1.jpg ADDED
samples/1756375334854__000000250_2.jpg ADDED
samples/1756375353333__000000250_3.jpg ADDED
samples/1756376207911__000000500_0.jpg ADDED
samples/1756376225865__000000500_1.jpg ADDED
samples/1756376243828__000000500_2.jpg ADDED
samples/1756376261779__000000500_3.jpg ADDED
samples/1756377090990__000000750_0.jpg ADDED
samples/1756377109045__000000750_1.jpg ADDED
samples/1756377127054__000000750_2.jpg ADDED
samples/1756377145099__000000750_3.jpg ADDED
samples/1756377982836__000001000_0.jpg ADDED
samples/1756378000801__000001000_1.jpg ADDED
samples/1756378018804__000001000_2.jpg ADDED
samples/1756378036823__000001000_3.jpg ADDED
samples/1756378886491__000001250_0.jpg ADDED
samples/1756378904513__000001250_1.jpg ADDED
samples/1756378922533__000001250_2.jpg ADDED
samples/1756378940577__000001250_3.jpg ADDED
samples/1756379777687__000001500_0.jpg ADDED
samples/1756379795729__000001500_1.jpg ADDED
samples/1756379813791__000001500_2.jpg ADDED
samples/1756379831902__000001500_3.jpg ADDED
samples/1756380653427__000001750_0.jpg ADDED
samples/1756380671561__000001750_1.jpg ADDED
samples/1756380689581__000001750_2.jpg ADDED
samples/1756380707690__000001750_3.jpg ADDED
samples/1756381577140__000002000_0.jpg ADDED
samples/1756381595151__000002000_1.jpg ADDED
samples/1756381613125__000002000_2.jpg ADDED
samples/1756381631089__000002000_3.jpg ADDED