LeonardoBenitez commited on
Commit
8f2ca6c
·
verified ·
1 Parent(s): ed8ce00

Upload folder using huggingface_hub

Browse files
models/people_Amelia_Vega_distil_400/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ hyperparameters:
3
+ lora_r: 16
4
+ lora_alpha: 4
5
+ is_lora_negated: false
6
+ seed: 42
7
+ model_name_or_path: CompVis/stable-diffusion-v1-4
8
+ revision: null
9
+ variant: null
10
+ dataset_forget_name: assets/datasets/lfw_splits_filtered/Amelia_Vega/train_forget
11
+ dataset_retain_name: assets/datasets/lfw_splits_filtered/Amelia_Vega/train_retain
12
+ dataset_forget_config_name: null
13
+ dataset_retain_config_name: null
14
+ image_column: image
15
+ caption_column: text
16
+ validation_prompt: An image of Amelia Vega
17
+ num_validation_images: 1
18
+ validation_epochs: 401
19
+ resolution: 512
20
+ center_crop: false
21
+ random_flip: true
22
+ max_train_samples: null
23
+ dataloader_num_workers: 2
24
+ prediction_type: null
25
+ per_device_train_batch_size: 2
26
+ gradient_accumulation_steps: 2
27
+ num_train_epochs: 400
28
+ learning_rate: 0.0006
29
+ lr_scheduler_type: constant
30
+ should_log: true
31
+ local_rank: -1
32
+ device: cuda
33
+ n_gpu: 1
34
+ output_dir: assets/models/people_Amelia_Vega_distil_400
35
+ cache_dir: null
36
+ hub_token: null
37
+ hub_model_id: null
38
+ logging_dir: logs
39
+ logging_steps: 20
40
+ save_strategy: epoch
41
+ save_total_limit: 2
42
+ gradient_checkpointing: false
43
+ enable_xformers_memory_efficient_attention: false
44
+ mixed_precision: 'no'
45
+ allow_tf32: false
46
+ use_8bit_adam: false
47
+ report_to: tensorboard
48
+ compute_gradient_conflict: false
49
+ compute_runtimes: true
50
+ max_train_steps: 800
51
+ lr_warmup_steps: 0
52
+ adam_beta1: 0.9
53
+ adam_beta2: 0.999
54
+ adam_weight_decay: 0.01
55
+ adam_epsilon: 1.0e-08
56
+ max_grad_norm: 5.0
57
+ checkpointing_steps: 10000
58
+ checkpoints_total_limit: null
59
+ resume_from_checkpoint: null
60
+ noise_offset: 0.0
61
+ overwritting_concept: a child
62
+ model-index:
63
+ - name: None
64
+ results:
65
+ - task:
66
+ type: text-to-image
67
+ dataset:
68
+ name: Forget set
69
+ type: inline-prompts
70
+ metrics:
71
+ - type: clip
72
+ value: 33.31444597244263
73
+ name: ForgetSet clip score of original model mean (~↑)
74
+ - type: clip
75
+ value: 2.6475742292024136
76
+ name: ForgetSet clip score of original model std (~↓)
77
+ - type: clip
78
+ value: 22.83225917816162
79
+ name: ForgetSet clip score of learned model mean (~↑)
80
+ - type: clip
81
+ value: 1.9811977037871389
82
+ name: ForgetSet clip score of learned model std (~↓)
83
+ - type: clip
84
+ value: 22.77885866165161
85
+ name: ForgetSet clip score of unlearned model mean (↓)
86
+ - type: clip
87
+ value: 5.060689691816349
88
+ name: ForgetSet clip score of unlearned model std (~↓)
89
+ - type: clip
90
+ value: 0.053400516510009766
91
+ name: ForgetSet clip score difference between learned and unlearned mean (↑)
92
+ - type: clip
93
+ value: 5.875521913004341
94
+ name: ForgetSet clip score difference between learned and unlearned std (~↓)
95
+ - type: clip
96
+ value: 10.535587310791016
97
+ name: ForgetSet clip score difference between original and unlearned mean (↑)
98
+ - type: clip
99
+ value: 2.966275892543176
100
+ name: ForgetSet clip score difference between original and unlearned std (~↓)
101
+ - type: clip
102
+ value: 10.482186794281006
103
+ name: ForgetSet clip score difference between original and learned mean (↓)
104
+ - type: clip
105
+ value: 4.1239398129472935
106
+ name: ForgetSet clip score difference between original and learned std (~↓)
107
+ - type: clip
108
+ value: 31.09067440032959
109
+ name: RetainSet clip score of original model mean (~↑)
110
+ - type: clip
111
+ value: 2.3280465098326126
112
+ name: RetainSet clip score of original model std (~↓)
113
+ - type: clip
114
+ value: 31.452622890472412
115
+ name: RetainSet clip score of learned model mean (~↓)
116
+ - type: clip
117
+ value: 1.9978701528699392
118
+ name: RetainSet clip score of learned model std (~↓)
119
+ - type: clip
120
+ value: 31.013118743896484
121
+ name: RetainSet clip score of unlearned model mean (↑)
122
+ - type: clip
123
+ value: 2.6018597655159974
124
+ name: RetainSet clip score of unlearned model std (~↓)
125
+ - type: clip
126
+ value: 0.43950414657592773
127
+ name: RetainSet clip score difference between learned and unlearned mean (↓)
128
+ - type: clip
129
+ value: 1.005614097070429
130
+ name: RetainSet clip score difference between learned and unlearned std (~↓)
131
+ - type: clip
132
+ value: 0.07755565643310547
133
+ name: RetainSet clip score difference between original and unlearned mean (↓)
134
+ - type: clip
135
+ value: 0.9441441722740401
136
+ name: RetainSet clip score difference between original and unlearned std (~↓)
137
+ - type: clip
138
+ value: -0.36194849014282227
139
+ name: RetainSet clip score difference between original and learned mean (↑)
140
+ - type: clip
141
+ value: 0.8606901283886421
142
+ name: RetainSet clip score difference between original and learned std (~↓)
143
+ - type: runtime
144
+ value: 7.186659157276154
145
+ name: Inference latency seconds mean (↓)
146
+ - type: runtime
147
+ value: 0.07107438773503902
148
+ name: Inference latency seconds std (~↓)
149
+ - task:
150
+ type: text-to-image
151
+ dataset:
152
+ name: assets/datasets/lfw_splits_filtered/Amelia_Vega/train_forget (forget)
153
+ and assets/datasets/lfw_splits_filtered/Amelia_Vega/train_retain (retain)
154
+ sets
155
+ type: forget-and-retain-together
156
+ metrics:
157
+ - type: runtime
158
+ value: 3.3904929161071777
159
+ name: Runtime init seconds (~↓)
160
+ - type: runtime
161
+ value: 1.118607521057129
162
+ name: Runtime data loading seconds (~↓)
163
+ - type: runtime
164
+ value: 3861.4693851470947
165
+ name: Runtime training seconds (↓)
166
+ - type: runtime
167
+ value: 184.91726994514465
168
+ name: Runtime eval seconds (~↓)
169
+ ---
170
+
171
+ <!-- This model card has been generated automatically according to the information the training script had access to. You
172
+ should probably proofread and complete it, then remove this comment. -->
173
+
174
+
175
+ # LoRA text2image fine-tuning - None
176
+ These are LoRA adaption weights for CompVis/stable-diffusion-v1-4.
177
+ The weights were fine-tuned for forgetting assets/datasets/lfw_splits_filtered/Amelia_Vega/train_forget dataset, while retaining assets/datasets/lfw_splits_filtered/Amelia_Vega/train_retain.
178
+ You can find some example images in the following.
179
+
180
+ ![img](images/val_prompt_00_01.png)
181
+ ![img](images/tst_prompt_399_01.png)
182
+ ![img](images/Forget - An image of Amelia Vega.png)
183
+ ![img](images/Forget - Photograph of Amelia Vega; high definition.png)
184
+ ![img](images/Forget - An picture of Amelia Vega in the rain.png)
185
+ ![img](images/Forget - An picture of Amelia Vega running.png)
186
+ ![img](images/Retain - An image of a child.png)
187
+ ![img](images/Retain - Photograph of a child; high definition.png)
188
+ ![img](images/Retain - An picture of a child in the rain.png)
189
+ ![img](images/Retain - An picture of a child running.png)
190
+
191
+
192
+
193
+ ## Intended uses & limitations
194
+
195
+ #### How to use
196
+
197
+ ```python
198
+ # TODO: add an example code snippet for running this diffusion pipeline
199
+ ```
200
+
201
+ #### Limitations and bias
202
+
203
+ [TODO: provide examples of latent issues and potential remediations]
204
+
205
+ ## Training details
206
+
207
+ [TODO: describe the data used to train the model]
models/people_Amelia_Vega_distil_400/images/Forget - An image of Amelia Vega.png ADDED

Git LFS Details

  • SHA256: e9a5886c0972ce4e0dea47ddeeb4604543d77997dddbacf404aae04b7769a757
  • Pointer size: 131 Bytes
  • Size of remote file: 841 kB
models/people_Amelia_Vega_distil_400/images/Forget - An picture of Amelia Vega in the rain.png ADDED

Git LFS Details

  • SHA256: 214e380a3126fc79aa2d0ce8de655de6462858f8a90c67e21cccf0a249f20537
  • Pointer size: 131 Bytes
  • Size of remote file: 833 kB
models/people_Amelia_Vega_distil_400/images/Forget - An picture of Amelia Vega running.png ADDED

Git LFS Details

  • SHA256: e32be749db2d69ea946af7cb32ced6f113e1603a4d7821f2e04d5f1b776b3733
  • Pointer size: 131 Bytes
  • Size of remote file: 725 kB
models/people_Amelia_Vega_distil_400/images/Forget - Photograph of Amelia Vega; high definition.png ADDED

Git LFS Details

  • SHA256: 82b31fe4c6d0b716deeb64d0d183ebd1c447bf611f9b1c3b8c879787ea6366a8
  • Pointer size: 131 Bytes
  • Size of remote file: 769 kB
models/people_Amelia_Vega_distil_400/images/Retain - An image of a child.png ADDED

Git LFS Details

  • SHA256: f79dab74d9ff4498ba05e69a191d74a503dd166ed93897de4d794d37c7b8abc8
  • Pointer size: 131 Bytes
  • Size of remote file: 558 kB
models/people_Amelia_Vega_distil_400/images/Retain - An picture of a child in the rain.png ADDED

Git LFS Details

  • SHA256: 249ecbeb0a8df1a7790240f934693742d1bd6f61ef73b6abecc2a6476ae94fcc
  • Pointer size: 131 Bytes
  • Size of remote file: 704 kB
models/people_Amelia_Vega_distil_400/images/Retain - An picture of a child running.png ADDED

Git LFS Details

  • SHA256: 226168feecab164d5b47a203dcad7239c8a9561e0f61e10ca07308ab11527494
  • Pointer size: 131 Bytes
  • Size of remote file: 781 kB
models/people_Amelia_Vega_distil_400/images/Retain - Photograph of a child; high definition.png ADDED

Git LFS Details

  • SHA256: 34db7ffdd26de4e98c04f9a221dfe59693143e05927a83424d3e35670d14d4bb
  • Pointer size: 131 Bytes
  • Size of remote file: 569 kB
models/people_Amelia_Vega_distil_400/images/tst_prompt_399_01.png ADDED

Git LFS Details

  • SHA256: 4d518a03817adeea80978e1121e2be21c077cca518645e30df137e8171d6f3f3
  • Pointer size: 131 Bytes
  • Size of remote file: 418 kB
models/people_Amelia_Vega_distil_400/images/val_prompt_00_01.png ADDED

Git LFS Details

  • SHA256: 005b64036920a2ad342d55b80ddeb104c086425976382724aaf16bef41f35e7c
  • Pointer size: 131 Bytes
  • Size of remote file: 459 kB
models/people_Amelia_Vega_distil_400/logs/text2image-fine-tune/1770487516.7983766/events.out.tfevents.1770487516.erdos.cl.itk.ppke.hu.3106877.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73117bc0039826275ca4b5ea40484f4e22081ab4b866afcc0039bce0cc0d0b85
3
+ size 2925
models/people_Amelia_Vega_distil_400/logs/text2image-fine-tune/1770487516.8018496/hparams.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ cache_dir: null
7
+ caption_column: text
8
+ center_crop: false
9
+ checkpointing_steps: 10000
10
+ checkpoints_total_limit: null
11
+ compute_gradient_conflict: false
12
+ compute_runtimes: true
13
+ dataloader_num_workers: 2
14
+ dataset_forget_config_name: null
15
+ dataset_forget_name: assets/datasets/lfw_splits_filtered/Amelia_Vega/train_forget
16
+ dataset_retain_config_name: null
17
+ dataset_retain_name: assets/datasets/lfw_splits_filtered/Amelia_Vega/train_retain
18
+ device: cuda
19
+ enable_xformers_memory_efficient_attention: false
20
+ gradient_accumulation_steps: 2
21
+ gradient_checkpointing: false
22
+ hub_model_id: null
23
+ hub_token: null
24
+ image_column: image
25
+ is_lora_negated: false
26
+ learning_rate: 0.0006
27
+ local_rank: -1
28
+ logging_dir: logs
29
+ logging_steps: 20
30
+ lora_alpha: 4
31
+ lora_r: 16
32
+ lr_scheduler_type: constant
33
+ lr_warmup_steps: 0
34
+ max_grad_norm: 5.0
35
+ max_train_samples: null
36
+ max_train_steps: 800
37
+ mixed_precision: 'no'
38
+ model_name_or_path: CompVis/stable-diffusion-v1-4
39
+ n_gpu: 1
40
+ noise_offset: 0.0
41
+ num_train_epochs: 400
42
+ num_validation_images: 1
43
+ output_dir: assets/models/people_Amelia_Vega_distil_400
44
+ overwritting_concept: a child
45
+ per_device_train_batch_size: 2
46
+ prediction_type: null
47
+ random_flip: true
48
+ report_to: tensorboard
49
+ resolution: 512
50
+ resume_from_checkpoint: null
51
+ revision: null
52
+ save_strategy: epoch
53
+ save_total_limit: 2
54
+ seed: 42
55
+ should_log: true
56
+ use_8bit_adam: false
57
+ validation_epochs: 401
58
+ validation_prompt: An image of Amelia Vega
59
+ variant: null
models/people_Amelia_Vega_distil_400/logs/text2image-fine-tune/events.out.tfevents.1770487516.erdos.cl.itk.ppke.hu.3106877.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c40aaefffe114a8acc32dbf7ea0de28a08fad9f5a0c20d64391a32714d30a32f
3
+ size 966722
models/people_Amelia_Vega_distil_400/pytorch_lora_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d68433ff27dedbf1220115153df3224bfa194ba29c0a8b05a57c6cfb2dbd3dcc
3
+ size 12792952