LeonardoBenitez commited on
Commit
c089193
·
verified ·
1 Parent(s): 18bd7f4

Upload folder using huggingface_hub

Browse files
Files changed (18) hide show
  1. models/people_Bob_Hope_munba_200/README.md +209 -0
  2. models/people_Bob_Hope_munba_200/images/Forget - An image of Bob Hope.png +3 -0
  3. models/people_Bob_Hope_munba_200/images/Forget - An picture of Bob Hope in the rain.png +3 -0
  4. models/people_Bob_Hope_munba_200/images/Forget - An picture of Bob Hope running.png +3 -0
  5. models/people_Bob_Hope_munba_200/images/Forget - Photograph of Bob Hope; high definition.png +3 -0
  6. models/people_Bob_Hope_munba_200/images/Retain - An image of a child.png +3 -0
  7. models/people_Bob_Hope_munba_200/images/Retain - An picture of a child in the rain.png +3 -0
  8. models/people_Bob_Hope_munba_200/images/Retain - An picture of a child running.png +3 -0
  9. models/people_Bob_Hope_munba_200/images/Retain - Photograph of a child; high definition.png +3 -0
  10. models/people_Bob_Hope_munba_200/images/tst_prompt_199_01.png +3 -0
  11. models/people_Bob_Hope_munba_200/images/val_prompt_00_01.png +3 -0
  12. models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/1771581133.078071/events.out.tfevents.1771581133.erdos.cl.itk.ppke.hu.3619980.5 +3 -0
  13. models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/1771581133.0817492/hparams.yml +59 -0
  14. models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/1771581245.3836448/events.out.tfevents.1771581245.erdos.cl.itk.ppke.hu.3633013.1 +3 -0
  15. models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/1771581245.4912925/hparams.yml +59 -0
  16. models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/events.out.tfevents.1771581133.erdos.cl.itk.ppke.hu.3619980.4 +3 -0
  17. models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/events.out.tfevents.1771581245.erdos.cl.itk.ppke.hu.3633013.0 +3 -0
  18. models/people_Bob_Hope_munba_200/pytorch_lora_weights.safetensors +3 -0
models/people_Bob_Hope_munba_200/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ hyperparameters:
3
+ lora_r: 16
4
+ lora_alpha: 4
5
+ is_lora_negated: true
6
+ seed: 42
7
+ model_name_or_path: CompVis/stable-diffusion-v1-4
8
+ revision: null
9
+ variant: null
10
+ dataset_forget_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_forget
11
+ dataset_retain_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_retain
12
+ dataset_forget_config_name: null
13
+ dataset_retain_config_name: null
14
+ image_column: image
15
+ caption_column: text
16
+ validation_prompt: An image of Bob Hope
17
+ num_validation_images: 1
18
+ validation_epochs: 201
19
+ resolution: 512
20
+ center_crop: false
21
+ random_flip: true
22
+ max_train_samples: null
23
+ dataloader_num_workers: 2
24
+ prediction_type: null
25
+ per_device_train_batch_size: 2
26
+ gradient_accumulation_steps: 2
27
+ num_train_epochs: 200
28
+ learning_rate: 0.0006
29
+ lr_scheduler_type: constant
30
+ should_log: true
31
+ local_rank: -1
32
+ device: cuda
33
+ n_gpu: 1
34
+ output_dir: assets/models/people_Bob_Hope_munba_200
35
+ cache_dir: null
36
+ hub_token: null
37
+ hub_model_id: null
38
+ logging_dir: logs
39
+ logging_steps: 20
40
+ save_strategy: epoch
41
+ save_total_limit: 2
42
+ gradient_checkpointing: false
43
+ enable_xformers_memory_efficient_attention: false
44
+ mixed_precision: 'no'
45
+ allow_tf32: false
46
+ use_8bit_adam: false
47
+ report_to: tensorboard
48
+ compute_gradient_conflict: false
49
+ compute_runtimes: true
50
+ compute_memory: true
51
+ max_train_steps: 400
52
+ lr_warmup_steps: 0
53
+ adam_beta1: 0.9
54
+ adam_beta2: 0.999
55
+ adam_weight_decay: 0.01
56
+ adam_epsilon: 1.0e-08
57
+ max_grad_norm: 5.0
58
+ checkpointing_steps: 10000
59
+ checkpoints_total_limit: null
60
+ resume_from_checkpoint: null
61
+ noise_offset: 0.0
62
+ model-index:
63
+ - name: None
64
+ results:
65
+ - task:
66
+ type: text-to-image
67
+ dataset:
68
+ name: Forget set
69
+ type: inline-prompts
70
+ metrics:
71
+ - type: clip
72
+ value: 37.4520149230957
73
+ name: ForgetSet clip score of original model mean (~↑)
74
+ - type: clip
75
+ value: 2.0002123686707654
76
+ name: ForgetSet clip score of original model std (~↓)
77
+ - type: clip
78
+ value: 22.566225051879883
79
+ name: ForgetSet clip score of learned model mean (~↑)
80
+ - type: clip
81
+ value: 0.9436161392607103
82
+ name: ForgetSet clip score of learned model std (~↓)
83
+ - type: clip
84
+ value: 22.895905017852783
85
+ name: ForgetSet clip score of unlearned model mean (↓)
86
+ - type: clip
87
+ value: 0.793959292329927
88
+ name: ForgetSet clip score of unlearned model std (~↓)
89
+ - type: clip
90
+ value: -0.3296799659729004
91
+ name: ForgetSet clip score difference between learned and unlearned mean (↑)
92
+ - type: clip
93
+ value: 0.7021400860429567
94
+ name: ForgetSet clip score difference between learned and unlearned std (~↓)
95
+ - type: clip
96
+ value: 14.55610990524292
97
+ name: ForgetSet clip score difference between original and unlearned mean (↑)
98
+ - type: clip
99
+ value: 2.4215973786452825
100
+ name: ForgetSet clip score difference between original and unlearned std (~↓)
101
+ - type: clip
102
+ value: 14.88578987121582
103
+ name: ForgetSet clip score difference between original and learned mean (↓)
104
+ - type: clip
105
+ value: 2.7580758149312237
106
+ name: ForgetSet clip score difference between original and learned std (~↓)
107
+ - type: clip
108
+ value: 30.71055316925049
109
+ name: RetainSet clip score of original model mean (~↑)
110
+ - type: clip
111
+ value: 1.864312610848831
112
+ name: RetainSet clip score of original model std (~↓)
113
+ - type: clip
114
+ value: 24.264270305633545
115
+ name: RetainSet clip score of learned model mean (~↓)
116
+ - type: clip
117
+ value: 0.44723445990341937
118
+ name: RetainSet clip score of learned model std (~↓)
119
+ - type: clip
120
+ value: 23.95001745223999
121
+ name: RetainSet clip score of unlearned model mean (↑)
122
+ - type: clip
123
+ value: 0.8432963003091453
124
+ name: RetainSet clip score of unlearned model std (~↓)
125
+ - type: clip
126
+ value: 0.3142528533935547
127
+ name: RetainSet clip score difference between learned and unlearned mean (↓)
128
+ - type: clip
129
+ value: 0.5125290207611559
130
+ name: RetainSet clip score difference between learned and unlearned std (~↓)
131
+ - type: clip
132
+ value: 6.760535717010498
133
+ name: RetainSet clip score difference between original and unlearned mean (↓)
134
+ - type: clip
135
+ value: 2.451657311156145
136
+ name: RetainSet clip score difference between original and unlearned std (~↓)
137
+ - type: clip
138
+ value: 6.446282863616943
139
+ name: RetainSet clip score difference between original and learned mean (↑)
140
+ - type: clip
141
+ value: 2.2797904582089465
142
+ name: RetainSet clip score difference between original and learned std (~↓)
143
+ - type: runtime
144
+ value: 7.081277370452881
145
+ name: Inference latency seconds mean (↓)
146
+ - type: runtime
147
+ value: 0.11060397618626638
148
+ name: Inference latency seconds std (~↓)
149
+ - task:
150
+ type: text-to-image
151
+ dataset:
152
+ name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_forget (forget) and
153
+ assets/datasets/lfw_splits_filtered/Bob_Hope/train_retain (retain) sets
154
+ type: forget-and-retain-together
155
+ metrics:
156
+ - type: runtime
157
+ value: 3.1926398277282715
158
+ name: Runtime init seconds (~↓)
159
+ - type: runtime
160
+ value: 30.240179300308228
161
+ name: Runtime data loading seconds (~↓)
162
+ - type: runtime
163
+ value: 1418.480504989624
164
+ name: Runtime training seconds (↓)
165
+ - type: runtime
166
+ value: 175.80327033996582
167
+ name: Runtime eval seconds (~↓)
168
+ - type: memory
169
+ value: 0
170
+ name: Peak memory usage in training (~↓)
171
+ ---
172
+
173
+ <!-- This model card has been generated automatically according to the information the training script had access to. You
174
+ should probably proofread and complete it, then remove this comment. -->
175
+
176
+
177
+ # LoRA text2image fine-tuning - None
178
+ These are LoRA adaption weights for CompVis/stable-diffusion-v1-4.
179
+ The weights were fine-tuned for forgetting assets/datasets/lfw_splits_filtered/Bob_Hope/train_forget dataset, while retaining assets/datasets/lfw_splits_filtered/Bob_Hope/train_retain.
180
+ You can find some example images in the following.
181
+
182
+ ![img](images/val_prompt_00_01.png)
183
+ ![img](images/tst_prompt_199_01.png)
184
+ ![img](images/Forget - An image of Bob Hope.png)
185
+ ![img](images/Forget - Photograph of Bob Hope; high definition.png)
186
+ ![img](images/Forget - An picture of Bob Hope in the rain.png)
187
+ ![img](images/Forget - An picture of Bob Hope running.png)
188
+ ![img](images/Retain - An image of a child.png)
189
+ ![img](images/Retain - Photograph of a child; high definition.png)
190
+ ![img](images/Retain - An picture of a child in the rain.png)
191
+ ![img](images/Retain - An picture of a child running.png)
192
+
193
+
194
+
195
+ ## Intended uses & limitations
196
+
197
+ #### How to use
198
+
199
+ ```python
200
+ # TODO: add an example code snippet for running this diffusion pipeline
201
+ ```
202
+
203
+ #### Limitations and bias
204
+
205
+ [TODO: provide examples of latent issues and potential remediations]
206
+
207
+ ## Training details
208
+
209
+ [TODO: describe the data used to train the model]
models/people_Bob_Hope_munba_200/images/Forget - An image of Bob Hope.png ADDED

Git LFS Details

  • SHA256: 8d4745d666113eb6f018622610fe43fcd8fefbac5af5edcc097b21d46183e892
  • Pointer size: 131 Bytes
  • Size of remote file: 768 kB
models/people_Bob_Hope_munba_200/images/Forget - An picture of Bob Hope in the rain.png ADDED

Git LFS Details

  • SHA256: 793ab81ee4e1d813ebf4856e601af257854b1b34fce9d666569bb3b515283fb6
  • Pointer size: 131 Bytes
  • Size of remote file: 818 kB
models/people_Bob_Hope_munba_200/images/Forget - An picture of Bob Hope running.png ADDED

Git LFS Details

  • SHA256: 723a1f94d4f2c2d5cd4508ddcfaa5109cafaec08c023c85cb7c8a91272271db5
  • Pointer size: 131 Bytes
  • Size of remote file: 791 kB
models/people_Bob_Hope_munba_200/images/Forget - Photograph of Bob Hope; high definition.png ADDED

Git LFS Details

  • SHA256: 2a1f4158839fd2e66672fed0d83e95b85144d141d4e27d9cbe31f5eefe410375
  • Pointer size: 131 Bytes
  • Size of remote file: 748 kB
models/people_Bob_Hope_munba_200/images/Retain - An image of a child.png ADDED

Git LFS Details

  • SHA256: 8a34e37270eb6754e59ca4d698bd62194ae53f9a6ed70408471dec08bacede1a
  • Pointer size: 131 Bytes
  • Size of remote file: 774 kB
models/people_Bob_Hope_munba_200/images/Retain - An picture of a child in the rain.png ADDED

Git LFS Details

  • SHA256: de7f0b7438baf1b3d9f70928b744650d5de10a4d020688019c54c2cb3c0df759
  • Pointer size: 131 Bytes
  • Size of remote file: 811 kB
models/people_Bob_Hope_munba_200/images/Retain - An picture of a child running.png ADDED

Git LFS Details

  • SHA256: cb98f8a4de91f35fefcc9fc191ba2d2739dddeefc559990c125da8ff7b077b58
  • Pointer size: 131 Bytes
  • Size of remote file: 822 kB
models/people_Bob_Hope_munba_200/images/Retain - Photograph of a child; high definition.png ADDED

Git LFS Details

  • SHA256: ed734fb02bf98aa373cbe4d6c4ece5721772229480f8a0b91bc91d283751dabc
  • Pointer size: 131 Bytes
  • Size of remote file: 854 kB
models/people_Bob_Hope_munba_200/images/tst_prompt_199_01.png ADDED

Git LFS Details

  • SHA256: e9c4a470ad900801f7de4f9402eb27af8a1cc00eac80d618ef16bac39fb27d33
  • Pointer size: 128 Bytes
  • Size of remote file: 842 Bytes
models/people_Bob_Hope_munba_200/images/val_prompt_00_01.png ADDED

Git LFS Details

  • SHA256: cadc9f0ad12922bc042c29b3bc483c7eb45f1cecb71d0e01387172a973da1e8a
  • Pointer size: 131 Bytes
  • Size of remote file: 419 kB
models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/1771581133.078071/events.out.tfevents.1771581133.erdos.cl.itk.ppke.hu.3619980.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ae27b58f0bc42195fc1cb8ab3f30c6c5b2db71190cc509bd57960c9d1e040b0
3
+ size 2900
models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/1771581133.0817492/hparams.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ cache_dir: null
7
+ caption_column: text
8
+ center_crop: false
9
+ checkpointing_steps: 10000
10
+ checkpoints_total_limit: null
11
+ compute_gradient_conflict: false
12
+ compute_memory: true
13
+ compute_runtimes: true
14
+ dataloader_num_workers: 2
15
+ dataset_forget_config_name: null
16
+ dataset_forget_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_forget
17
+ dataset_retain_config_name: null
18
+ dataset_retain_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_retain
19
+ device: cuda
20
+ enable_xformers_memory_efficient_attention: false
21
+ gradient_accumulation_steps: 2
22
+ gradient_checkpointing: false
23
+ hub_model_id: null
24
+ hub_token: null
25
+ image_column: image
26
+ is_lora_negated: true
27
+ learning_rate: 0.0006
28
+ local_rank: -1
29
+ logging_dir: logs
30
+ logging_steps: 20
31
+ lora_alpha: 4
32
+ lora_r: 16
33
+ lr_scheduler_type: constant
34
+ lr_warmup_steps: 0
35
+ max_grad_norm: 5.0
36
+ max_train_samples: null
37
+ max_train_steps: 400
38
+ mixed_precision: 'no'
39
+ model_name_or_path: CompVis/stable-diffusion-v1-4
40
+ n_gpu: 1
41
+ noise_offset: 0.0
42
+ num_train_epochs: 200
43
+ num_validation_images: 1
44
+ output_dir: assets/models/people_Bob_Hope_munba_200
45
+ per_device_train_batch_size: 2
46
+ prediction_type: null
47
+ random_flip: true
48
+ report_to: tensorboard
49
+ resolution: 512
50
+ resume_from_checkpoint: null
51
+ revision: null
52
+ save_strategy: epoch
53
+ save_total_limit: 2
54
+ seed: 42
55
+ should_log: true
56
+ use_8bit_adam: false
57
+ validation_epochs: 201
58
+ validation_prompt: An image of Bob Hope
59
+ variant: null
models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/1771581245.3836448/events.out.tfevents.1771581245.erdos.cl.itk.ppke.hu.3633013.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43b8cb1eb77060444caaa25c8cd7fcf882b4026219a29634c5f0796c92558140
3
+ size 2900
models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/1771581245.4912925/hparams.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ cache_dir: null
7
+ caption_column: text
8
+ center_crop: false
9
+ checkpointing_steps: 10000
10
+ checkpoints_total_limit: null
11
+ compute_gradient_conflict: false
12
+ compute_memory: true
13
+ compute_runtimes: true
14
+ dataloader_num_workers: 2
15
+ dataset_forget_config_name: null
16
+ dataset_forget_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_forget
17
+ dataset_retain_config_name: null
18
+ dataset_retain_name: assets/datasets/lfw_splits_filtered/Bob_Hope/train_retain
19
+ device: cuda
20
+ enable_xformers_memory_efficient_attention: false
21
+ gradient_accumulation_steps: 2
22
+ gradient_checkpointing: false
23
+ hub_model_id: null
24
+ hub_token: null
25
+ image_column: image
26
+ is_lora_negated: true
27
+ learning_rate: 0.0006
28
+ local_rank: -1
29
+ logging_dir: logs
30
+ logging_steps: 20
31
+ lora_alpha: 4
32
+ lora_r: 16
33
+ lr_scheduler_type: constant
34
+ lr_warmup_steps: 0
35
+ max_grad_norm: 5.0
36
+ max_train_samples: null
37
+ max_train_steps: 400
38
+ mixed_precision: 'no'
39
+ model_name_or_path: CompVis/stable-diffusion-v1-4
40
+ n_gpu: 1
41
+ noise_offset: 0.0
42
+ num_train_epochs: 200
43
+ num_validation_images: 1
44
+ output_dir: assets/models/people_Bob_Hope_munba_200
45
+ per_device_train_batch_size: 2
46
+ prediction_type: null
47
+ random_flip: true
48
+ report_to: tensorboard
49
+ resolution: 512
50
+ resume_from_checkpoint: null
51
+ revision: null
52
+ save_strategy: epoch
53
+ save_total_limit: 2
54
+ seed: 42
55
+ should_log: true
56
+ use_8bit_adam: false
57
+ validation_epochs: 201
58
+ validation_prompt: An image of Bob Hope
59
+ variant: null
models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/events.out.tfevents.1771581133.erdos.cl.itk.ppke.hu.3619980.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:608d11cb0da9a3e73410eca5fbb097575f2c80e068d8f69732cab890eedf1d93
3
+ size 88
models/people_Bob_Hope_munba_200/logs/text2image-fine-tune/events.out.tfevents.1771581245.erdos.cl.itk.ppke.hu.3633013.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eb32e4555abcaf8b3e1f5d1179cdc7cde91fda26a9c1d7700d9db43e3a52365
3
+ size 464728
models/people_Bob_Hope_munba_200/pytorch_lora_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6118394550b2b4060b11a7b43e838379218db6882e89276174e1aab176bc1512
3
+ size 12792952