LeonardoBenitez commited on
Commit
d4486f6
·
verified ·
1 Parent(s): b5c698c

Upload folder using huggingface_hub

Browse files
models/people_Bob_Hope_distil_400/README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ hyperparameters:
3
+ lora_r: 16
4
+ lora_alpha: 4
5
+ is_lora_negated: false
6
+ seed: 42
7
+ model_name_or_path: CompVis/stable-diffusion-v1-4
8
+ revision: null
9
+ variant: null
10
+ dataset_forget_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_forget
11
+ dataset_retain_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_retain
12
+ dataset_forget_config_name: null
13
+ dataset_retain_config_name: null
14
+ image_column: image
15
+ caption_column: text
16
+ validation_prompt: An image of Bob Hope
17
+ num_validation_images: 1
18
+ validation_epochs: 401
19
+ resolution: 512
20
+ center_crop: false
21
+ random_flip: true
22
+ max_train_samples: null
23
+ dataloader_num_workers: 2
24
+ prediction_type: null
25
+ per_device_train_batch_size: 2
26
+ gradient_accumulation_steps: 2
27
+ num_train_epochs: 400
28
+ learning_rate: 0.0006
29
+ lr_scheduler_type: constant
30
+ should_log: true
31
+ local_rank: -1
32
+ device: cuda
33
+ n_gpu: 1
34
+ output_dir: assets/models/people_Bob_Hope_distil_400
35
+ cache_dir: null
36
+ hub_token: null
37
+ hub_model_id: null
38
+ logging_dir: logs
39
+ logging_steps: 20
40
+ save_strategy: epoch
41
+ save_total_limit: 2
42
+ gradient_checkpointing: false
43
+ enable_xformers_memory_efficient_attention: false
44
+ mixed_precision: 'no'
45
+ allow_tf32: false
46
+ use_8bit_adam: false
47
+ report_to: tensorboard
48
+ compute_gradient_conflict: false
49
+ compute_runtimes: true
50
+ max_train_steps: 800
51
+ lr_warmup_steps: 0
52
+ adam_beta1: 0.9
53
+ adam_beta2: 0.999
54
+ adam_weight_decay: 0.01
55
+ adam_epsilon: 1.0e-08
56
+ max_grad_norm: 5.0
57
+ checkpointing_steps: 10000
58
+ checkpoints_total_limit: null
59
+ resume_from_checkpoint: null
60
+ noise_offset: 0.0
61
+ overwritting_concept: a child
62
+ model-index:
63
+ - name: None
64
+ results:
65
+ - task:
66
+ type: text-to-image
67
+ dataset:
68
+ name: Forget set
69
+ type: inline-prompts
70
+ metrics:
71
+ - type: clip
72
+ value: 37.543532371520996
73
+ name: ForgetSet clip score of original model mean (~↑)
74
+ - type: clip
75
+ value: 0.2988977518697745
76
+ name: ForgetSet clip score of original model std (~↓)
77
+ - type: clip
78
+ value: 24.061158657073975
79
+ name: ForgetSet clip score of learned model mean (~↑)
80
+ - type: clip
81
+ value: 1.7380691511745432
82
+ name: ForgetSet clip score of learned model std (~↓)
83
+ - type: clip
84
+ value: 23.018229484558105
85
+ name: ForgetSet clip score of unlearned model mean (↓)
86
+ - type: clip
87
+ value: 4.280632487724009
88
+ name: ForgetSet clip score of unlearned model std (~↓)
89
+ - type: clip
90
+ value: 1.0429291725158691
91
+ name: ForgetSet clip score difference between learned and unlearned mean (↑)
92
+ - type: clip
93
+ value: 3.3863711645628713
94
+ name: ForgetSet clip score difference between learned and unlearned std (~↓)
95
+ - type: clip
96
+ value: 14.52530288696289
97
+ name: ForgetSet clip score difference between original and unlearned mean (↑)
98
+ - type: clip
99
+ value: 4.311621309873136
100
+ name: ForgetSet clip score difference between original and unlearned std (~↓)
101
+ - type: clip
102
+ value: 13.482373714447021
103
+ name: ForgetSet clip score difference between original and learned mean (↓)
104
+ - type: clip
105
+ value: 1.6400484379377902
106
+ name: ForgetSet clip score difference between original and learned std (~↓)
107
+ - type: clip
108
+ value: 31.09067440032959
109
+ name: RetainSet clip score of original model mean (~↑)
110
+ - type: clip
111
+ value: 2.3280465098326126
112
+ name: RetainSet clip score of original model std (~↓)
113
+ - type: clip
114
+ value: 31.033051013946533
115
+ name: RetainSet clip score of learned model mean (~↓)
116
+ - type: clip
117
+ value: 2.342254875834053
118
+ name: RetainSet clip score of learned model std (~↓)
119
+ - type: clip
120
+ value: 30.780762195587158
121
+ name: RetainSet clip score of unlearned model mean (↑)
122
+ - type: clip
123
+ value: 1.828176986427414
124
+ name: RetainSet clip score of unlearned model std (~↓)
125
+ - type: clip
126
+ value: 0.252288818359375
127
+ name: RetainSet clip score difference between learned and unlearned mean (↓)
128
+ - type: clip
129
+ value: 1.792190680276755
130
+ name: RetainSet clip score difference between learned and unlearned std (~↓)
131
+ - type: clip
132
+ value: 0.30991220474243164
133
+ name: RetainSet clip score difference between original and unlearned mean (↓)
134
+ - type: clip
135
+ value: 1.077879607023119
136
+ name: RetainSet clip score difference between original and unlearned std (~↓)
137
+ - type: clip
138
+ value: 0.05762338638305664
139
+ name: RetainSet clip score difference between original and learned mean (↑)
140
+ - type: clip
141
+ value: 2.8251085663009956
142
+ name: RetainSet clip score difference between original and learned std (~↓)
143
+ - type: runtime
144
+ value: 6.880054493745168
145
+ name: Inference latency seconds mean (↓)
146
+ - type: runtime
147
+ value: 0.028510428691405985
148
+ name: Inference latency seconds std (~↓)
149
+ - task:
150
+ type: text-to-image
151
+ dataset:
152
+ name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_forget (forget) and
153
+ assets/datasets/lfw_splits_filtered/Bob_Hope/train_retain (retain) sets
154
+ type: forget-and-retain-together
155
+ metrics:
156
+ - type: runtime
157
+ value: 3.63006854057312
158
+ name: Runtime init seconds (~↓)
159
+ - type: runtime
160
+ value: 1.102853536605835
161
+ name: Runtime data loading seconds (~↓)
162
+ - type: runtime
163
+ value: 3846.02255654335
164
+ name: Runtime training seconds (↓)
165
+ - type: runtime
166
+ value: 177.79666829109192
167
+ name: Runtime eval seconds (~↓)
168
+ ---
169
+
170
+ <!-- This model card has been generated automatically according to the information the training script had access to. You
171
+ should probably proofread and complete it, then remove this comment. -->
172
+
173
+
174
+ # LoRA text2image fine-tuning - None
175
+ These are LoRA adaption weights for CompVis/stable-diffusion-v1-4.
176
+ The weights were fine-tuned for forgetting assets/datasets/lfw_splits_filtered/Bob_Hope/train_forget dataset, while retaining assets/datasets/lfw_splits_filtered/Bob_Hope/train_retain.
177
+ You can find some example images in the following.
178
+
179
+ ![img](images/val_prompt_00_01.png)
180
+ ![img](images/tst_prompt_399_01.png)
181
+ ![img](images/Forget - An image of Bob Hope.png)
182
+ ![img](images/Forget - Photograph of Bob Hope; high definition.png)
183
+ ![img](images/Forget - An picture of Bob Hope in the rain.png)
184
+ ![img](images/Forget - An picture of Bob Hope running.png)
185
+ ![img](images/Retain - An image of a child.png)
186
+ ![img](images/Retain - Photograph of a child; high definition.png)
187
+ ![img](images/Retain - An picture of a child in the rain.png)
188
+ ![img](images/Retain - An picture of a child running.png)
189
+
190
+
191
+
192
+ ## Intended uses & limitations
193
+
194
+ #### How to use
195
+
196
+ ```python
197
+ # TODO: add an example code snippet for running this diffusion pipeline
198
+ ```
199
+
200
+ #### Limitations and bias
201
+
202
+ [TODO: provide examples of latent issues and potential remediations]
203
+
204
+ ## Training details
205
+
206
+ [TODO: describe the data used to train the model]
models/people_Bob_Hope_distil_400/images/Forget - An image of Bob Hope.png ADDED

Git LFS Details

  • SHA256: ff6f8cb05daff31409316e8ac09b8dab318f6e8fb701323588708c4cc9f3a04c
  • Pointer size: 131 Bytes
  • Size of remote file: 624 kB
models/people_Bob_Hope_distil_400/images/Forget - An picture of Bob Hope in the rain.png ADDED

Git LFS Details

  • SHA256: d01d50a18230142aa718bc0f8aa5d571f27880993175382e91f18dce193c878b
  • Pointer size: 131 Bytes
  • Size of remote file: 869 kB
models/people_Bob_Hope_distil_400/images/Forget - An picture of Bob Hope running.png ADDED

Git LFS Details

  • SHA256: fb6b4af1bb62a57b4d9cd4c20c6fff1e6774b478bf66debb827c8b726c2b957f
  • Pointer size: 131 Bytes
  • Size of remote file: 720 kB
models/people_Bob_Hope_distil_400/images/Forget - Photograph of Bob Hope; high definition.png ADDED

Git LFS Details

  • SHA256: 81795e13f44f88ff17e83791c0661694c3dc4dce6d3128642b60f510488a99d8
  • Pointer size: 131 Bytes
  • Size of remote file: 728 kB
models/people_Bob_Hope_distil_400/images/Retain - An image of a child.png ADDED

Git LFS Details

  • SHA256: 6cca4ab34879b8712089dc4607ba4aad13c794508ea9c470f4ce3bc90001dfd2
  • Pointer size: 131 Bytes
  • Size of remote file: 557 kB
models/people_Bob_Hope_distil_400/images/Retain - An picture of a child in the rain.png ADDED

Git LFS Details

  • SHA256: c8bad3a8c3eeed0e3118dcc09084ba69a3b59427defb1de6071fa1cdc6354456
  • Pointer size: 131 Bytes
  • Size of remote file: 686 kB
models/people_Bob_Hope_distil_400/images/Retain - An picture of a child running.png ADDED

Git LFS Details

  • SHA256: 2db32e713b88234b664a8d21ad59cd819942a685e0fda03a02b12421a6455118
  • Pointer size: 131 Bytes
  • Size of remote file: 818 kB
models/people_Bob_Hope_distil_400/images/Retain - Photograph of a child; high definition.png ADDED

Git LFS Details

  • SHA256: 7169f52ac2282b5e152866752ff29099d15288a2d6127998a9bc351c1f516b9d
  • Pointer size: 131 Bytes
  • Size of remote file: 579 kB
models/people_Bob_Hope_distil_400/images/tst_prompt_399_01.png ADDED

Git LFS Details

  • SHA256: 77de77b2d1b2cbed4316f6691154b5266ff67436ab43459218b5f31a84387668
  • Pointer size: 131 Bytes
  • Size of remote file: 486 kB
models/people_Bob_Hope_distil_400/images/val_prompt_00_01.png ADDED

Git LFS Details

  • SHA256: e3370ad5f379ea7ede1d5f106764a02191207bfd71729e868977b78bbf8664fb
  • Pointer size: 131 Bytes
  • Size of remote file: 423 kB
models/people_Bob_Hope_distil_400/logs/text2image-fine-tune/1770468255.1293645/events.out.tfevents.1770468255.erdos.cl.itk.ppke.hu.3080550.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36c0b471ace1f157da391d94ea7d2418424210a88b3c7873d413234385234d4e
3
+ size 2913
models/people_Bob_Hope_distil_400/logs/text2image-fine-tune/1770468255.1327958/hparams.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ cache_dir: null
7
+ caption_column: text
8
+ center_crop: false
9
+ checkpointing_steps: 10000
10
+ checkpoints_total_limit: null
11
+ compute_gradient_conflict: false
12
+ compute_runtimes: true
13
+ dataloader_num_workers: 2
14
+ dataset_forget_config_name: null
15
+ dataset_forget_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_forget
16
+ dataset_retain_config_name: null
17
+ dataset_retain_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_retain
18
+ device: cuda
19
+ enable_xformers_memory_efficient_attention: false
20
+ gradient_accumulation_steps: 2
21
+ gradient_checkpointing: false
22
+ hub_model_id: null
23
+ hub_token: null
24
+ image_column: image
25
+ is_lora_negated: false
26
+ learning_rate: 0.0006
27
+ local_rank: -1
28
+ logging_dir: logs
29
+ logging_steps: 20
30
+ lora_alpha: 4
31
+ lora_r: 16
32
+ lr_scheduler_type: constant
33
+ lr_warmup_steps: 0
34
+ max_grad_norm: 5.0
35
+ max_train_samples: null
36
+ max_train_steps: 800
37
+ mixed_precision: 'no'
38
+ model_name_or_path: CompVis/stable-diffusion-v1-4
39
+ n_gpu: 1
40
+ noise_offset: 0.0
41
+ num_train_epochs: 400
42
+ num_validation_images: 1
43
+ output_dir: assets/models/people_Bob_Hope_distil_400
44
+ overwritting_concept: a child
45
+ per_device_train_batch_size: 2
46
+ prediction_type: null
47
+ random_flip: true
48
+ report_to: tensorboard
49
+ resolution: 512
50
+ resume_from_checkpoint: null
51
+ revision: null
52
+ save_strategy: epoch
53
+ save_total_limit: 2
54
+ seed: 42
55
+ should_log: true
56
+ use_8bit_adam: false
57
+ validation_epochs: 401
58
+ validation_prompt: An image of Bob Hope
59
+ variant: null
models/people_Bob_Hope_distil_400/logs/text2image-fine-tune/events.out.tfevents.1770468255.erdos.cl.itk.ppke.hu.3080550.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:974491b706edbbf9dc1d593fe0ecd9ab49243f01bc0992d57e43be4e0ebacef6
3
+ size 999018
models/people_Bob_Hope_distil_400/pytorch_lora_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3f774ed500526c72c0e4d682ff14e228ef16e34b77f6964ddda26dbee2536ae
3
+ size 12792952