LeonardoBenitez commited on
Commit
923d104
·
verified ·
1 Parent(s): 6bd41dd

Upload folder using huggingface_hub

Browse files
models/people_Carlos_Menem_munba_200/README.md ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ hyperparameters:
3
+ lora_r: 16
4
+ lora_alpha: 4
5
+ is_lora_negated: true
6
+ seed: 42
7
+ model_name_or_path: CompVis/stable-diffusion-v1-4
8
+ revision: null
9
+ variant: null
10
+ dataset_forget_name: assets/datasets/lfw_splits_filtered/Carlos_Menem/train_forget
11
+ dataset_retain_name: assets/datasets/lfw_splits_filtered/Carlos_Menem/train_retain
12
+ dataset_forget_config_name: null
13
+ dataset_retain_config_name: null
14
+ image_column: image
15
+ caption_column: text
16
+ validation_prompt: An image of Carlos Menem
17
+ num_validation_images: 1
18
+ validation_epochs: 201
19
+ resolution: 512
20
+ center_crop: false
21
+ random_flip: true
22
+ max_train_samples: null
23
+ dataloader_num_workers: 2
24
+ prediction_type: null
25
+ per_device_train_batch_size: 2
26
+ gradient_accumulation_steps: 2
27
+ num_train_epochs: 200
28
+ learning_rate: 0.0006
29
+ lr_scheduler_type: constant
30
+ should_log: true
31
+ local_rank: -1
32
+ device: cuda
33
+ n_gpu: 1
34
+ output_dir: assets/models/people_Carlos_Menem_munba_200
35
+ cache_dir: null
36
+ hub_token: null
37
+ hub_model_id: null
38
+ logging_dir: logs
39
+ logging_steps: 20
40
+ save_strategy: epoch
41
+ save_total_limit: 2
42
+ gradient_checkpointing: false
43
+ enable_xformers_memory_efficient_attention: false
44
+ mixed_precision: 'no'
45
+ allow_tf32: false
46
+ use_8bit_adam: false
47
+ report_to: tensorboard
48
+ compute_gradient_conflict: false
49
+ compute_runtimes: true
50
+ compute_memory: true
51
+ max_train_steps: 400
52
+ lr_warmup_steps: 0
53
+ adam_beta1: 0.9
54
+ adam_beta2: 0.999
55
+ adam_weight_decay: 0.01
56
+ adam_epsilon: 1.0e-08
57
+ max_grad_norm: 5.0
58
+ checkpointing_steps: 10000
59
+ checkpoints_total_limit: null
60
+ resume_from_checkpoint: null
61
+ noise_offset: 0.0
62
+ model-index:
63
+ - name: None
64
+ results:
65
+ - task:
66
+ type: text-to-image
67
+ dataset:
68
+ name: Forget set
69
+ type: inline-prompts
70
+ metrics:
71
+ - type: clip
72
+ value: 33.64024114608765
73
+ name: ForgetSet clip score of original model mean (~↑)
74
+ - type: clip
75
+ value: 3.1557685733520335
76
+ name: ForgetSet clip score of original model std (~↓)
77
+ - type: clip
78
+ value: 21.1767315864563
79
+ name: ForgetSet clip score of learned model mean (~↑)
80
+ - type: clip
81
+ value: 0.5174281021036993
82
+ name: ForgetSet clip score of learned model std (~↓)
83
+ - type: clip
84
+ value: 24.02331829071045
85
+ name: ForgetSet clip score of unlearned model mean (↓)
86
+ - type: clip
87
+ value: 2.0135561976578567
88
+ name: ForgetSet clip score of unlearned model std (~↓)
89
+ - type: clip
90
+ value: -2.8465867042541504
91
+ name: ForgetSet clip score difference between learned and unlearned mean (↑)
92
+ - type: clip
93
+ value: 2.122567916641444
94
+ name: ForgetSet clip score difference between learned and unlearned std (~↓)
95
+ - type: clip
96
+ value: 9.616922855377197
97
+ name: ForgetSet clip score difference between original and unlearned mean (↑)
98
+ - type: clip
99
+ value: 5.128420930046243
100
+ name: ForgetSet clip score difference between original and unlearned std (~↓)
101
+ - type: clip
102
+ value: 12.463509559631348
103
+ name: ForgetSet clip score difference between original and learned mean (↓)
104
+ - type: clip
105
+ value: 3.0206327665120503
106
+ name: ForgetSet clip score difference between original and learned std (~↓)
107
+ - type: clip
108
+ value: 30.71055316925049
109
+ name: RetainSet clip score of original model mean (~↑)
110
+ - type: clip
111
+ value: 1.864312610848831
112
+ name: RetainSet clip score of original model std (~↓)
113
+ - type: clip
114
+ value: 22.2283353805542
115
+ name: RetainSet clip score of learned model mean (~↓)
116
+ - type: clip
117
+ value: 0.48952612071895446
118
+ name: RetainSet clip score of learned model std (~↓)
119
+ - type: clip
120
+ value: 27.552839279174805
121
+ name: RetainSet clip score of unlearned model mean (↑)
122
+ - type: clip
123
+ value: 4.069867281608924
124
+ name: RetainSet clip score of unlearned model std (~↓)
125
+ - type: clip
126
+ value: -5.3245038986206055
127
+ name: RetainSet clip score difference between learned and unlearned mean (↓)
128
+ - type: clip
129
+ value: 3.8257833841662636
130
+ name: RetainSet clip score difference between learned and unlearned std (~↓)
131
+ - type: clip
132
+ value: 3.1577138900756836
133
+ name: RetainSet clip score difference between original and unlearned mean (↓)
134
+ - type: clip
135
+ value: 3.5168765994475355
136
+ name: RetainSet clip score difference between original and unlearned std (~↓)
137
+ - type: clip
138
+ value: 8.482217788696289
139
+ name: RetainSet clip score difference between original and learned mean (↑)
140
+ - type: clip
141
+ value: 1.448862558004187
142
+ name: RetainSet clip score difference between original and learned std (~↓)
143
+ - type: runtime
144
+ value: 7.08506174882253
145
+ name: Inference latency seconds mean (↓)
146
+ - type: runtime
147
+ value: 0.1205052687084748
148
+ name: Inference latency seconds std (~↓)
149
+ - task:
150
+ type: text-to-image
151
+ dataset:
152
+ name: assets/datasets/lfw_splits_filtered/Carlos_Menem/train_forget (forget)
153
+ and assets/datasets/lfw_splits_filtered/Carlos_Menem/train_retain (retain)
154
+ sets
155
+ type: forget-and-retain-together
156
+ metrics:
157
+ - type: runtime
158
+ value: 3.787001132965088
159
+ name: Runtime init seconds (~↓)
160
+ - type: runtime
161
+ value: 30.71293592453003
162
+ name: Runtime data loading seconds (~↓)
163
+ - type: runtime
164
+ value: 1396.6229717731476
165
+ name: Runtime training seconds (↓)
166
+ - type: runtime
167
+ value: 182.4927041530609
168
+ name: Runtime eval seconds (~↓)
169
+ - type: memory
170
+ value: 0
171
+ name: Peak memory usage in training (~↓)
172
+ ---
173
+
174
+ <!-- This model card has been generated automatically according to the information the training script had access to. You
175
+ should probably proofread and complete it, then remove this comment. -->
176
+
177
+
178
+ # LoRA text2image fine-tuning - None
179
+ These are LoRA adaption weights for CompVis/stable-diffusion-v1-4.
180
+ The weights were fine-tuned for forgetting assets/datasets/lfw_splits_filtered/Carlos_Menem/train_forget dataset, while retaining assets/datasets/lfw_splits_filtered/Carlos_Menem/train_retain.
181
+ You can find some example images in the following.
182
+
183
+ ![img](images/val_prompt_00_01.png)
184
+ ![img](images/tst_prompt_199_01.png)
185
+ ![img](images/Forget - An image of Carlos Menem.png)
186
+ ![img](images/Forget - Photograph of Carlos Menem; high definition.png)
187
+ ![img](images/Forget - An picture of Carlos Menem in the rain.png)
188
+ ![img](images/Forget - An picture of Carlos Menem running.png)
189
+ ![img](images/Retain - An image of a child.png)
190
+ ![img](images/Retain - Photograph of a child; high definition.png)
191
+ ![img](images/Retain - An picture of a child in the rain.png)
192
+ ![img](images/Retain - An picture of a child running.png)
193
+
194
+
195
+
196
+ ## Intended uses & limitations
197
+
198
+ #### How to use
199
+
200
+ ```python
201
+ # TODO: add an example code snippet for running this diffusion pipeline
202
+ ```
203
+
204
+ #### Limitations and bias
205
+
206
+ [TODO: provide examples of latent issues and potential remediations]
207
+
208
+ ## Training details
209
+
210
+ [TODO: describe the data used to train the model]
models/people_Carlos_Menem_munba_200/images/Forget - An image of Carlos Menem.png ADDED

Git LFS Details

  • SHA256: b7782bae3c9507aed2f2419cd9e6dafea8874eab2190411fc79429bb8e7e96f0
  • Pointer size: 131 Bytes
  • Size of remote file: 625 kB
models/people_Carlos_Menem_munba_200/images/Forget - An picture of Carlos Menem in the rain.png ADDED

Git LFS Details

  • SHA256: e8a166310935660214b1943f48adf24edc9ba60d8ed7145d8ce236b08357a2af
  • Pointer size: 131 Bytes
  • Size of remote file: 768 kB
models/people_Carlos_Menem_munba_200/images/Forget - An picture of Carlos Menem running.png ADDED

Git LFS Details

  • SHA256: e7ce2ae320ccef22a1552d3a001ac50ee2cd2fc5fa3b589ff020e79d3a85e84e
  • Pointer size: 131 Bytes
  • Size of remote file: 653 kB
models/people_Carlos_Menem_munba_200/images/Forget - Photograph of Carlos Menem; high definition.png ADDED

Git LFS Details

  • SHA256: 57ca23f85209861c68f2d8a341ae80b46b12b1ab94a0073e682649b5fefead08
  • Pointer size: 131 Bytes
  • Size of remote file: 619 kB
models/people_Carlos_Menem_munba_200/images/Retain - An image of a child.png ADDED

Git LFS Details

  • SHA256: 378828042297e782cd1d1f18d722c35a6c3b62c313c3bf941c76d30e738d25ad
  • Pointer size: 131 Bytes
  • Size of remote file: 714 kB
models/people_Carlos_Menem_munba_200/images/Retain - An picture of a child in the rain.png ADDED

Git LFS Details

  • SHA256: e0ce2a594dfdae88a0412808d21ce48ec47b716e1f981d8e11a0ac8fe61b3d0c
  • Pointer size: 131 Bytes
  • Size of remote file: 800 kB
models/people_Carlos_Menem_munba_200/images/Retain - An picture of a child running.png ADDED

Git LFS Details

  • SHA256: 0ae79d93ca395d1e08156a964e26f942dfa3b59b9b481e62625ecfe6e7df83e5
  • Pointer size: 131 Bytes
  • Size of remote file: 750 kB
models/people_Carlos_Menem_munba_200/images/Retain - Photograph of a child; high definition.png ADDED

Git LFS Details

  • SHA256: 80045ce80caaab173aaab64cd049c697ed8e5541431a6b2702d086e7f54be261
  • Pointer size: 131 Bytes
  • Size of remote file: 871 kB
models/people_Carlos_Menem_munba_200/images/tst_prompt_199_01.png ADDED

Git LFS Details

  • SHA256: 4f3d0e724c8a464d97f2064fc052a0dd6f11f0f612533d088c2931ef2b0be054
  • Pointer size: 131 Bytes
  • Size of remote file: 500 kB
models/people_Carlos_Menem_munba_200/images/val_prompt_00_01.png ADDED

Git LFS Details

  • SHA256: e4fda05f77d82a060a9e9bc511969c684718308cd9cf61712970ca61acfdf040
  • Pointer size: 131 Bytes
  • Size of remote file: 440 kB
models/people_Carlos_Menem_munba_200/logs/text2image-fine-tune/1771450156.052087/events.out.tfevents.1771450156.erdos.cl.itk.ppke.hu.3328580.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c652c59e8f90e156f6b37c25d93dc5c10971d4e869053dfb9629df61004c778a
3
+ size 2916
models/people_Carlos_Menem_munba_200/logs/text2image-fine-tune/1771450156.0560246/hparams.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ cache_dir: null
7
+ caption_column: text
8
+ center_crop: false
9
+ checkpointing_steps: 10000
10
+ checkpoints_total_limit: null
11
+ compute_gradient_conflict: false
12
+ compute_memory: true
13
+ compute_runtimes: true
14
+ dataloader_num_workers: 2
15
+ dataset_forget_config_name: null
16
+ dataset_forget_name: assets/datasets/lfw_splits_filtered/Carlos_Menem/train_forget
17
+ dataset_retain_config_name: null
18
+ dataset_retain_name: assets/datasets/lfw_splits_filtered/Carlos_Menem/train_retain
19
+ device: cuda
20
+ enable_xformers_memory_efficient_attention: false
21
+ gradient_accumulation_steps: 2
22
+ gradient_checkpointing: false
23
+ hub_model_id: null
24
+ hub_token: null
25
+ image_column: image
26
+ is_lora_negated: true
27
+ learning_rate: 0.0006
28
+ local_rank: -1
29
+ logging_dir: logs
30
+ logging_steps: 20
31
+ lora_alpha: 4
32
+ lora_r: 16
33
+ lr_scheduler_type: constant
34
+ lr_warmup_steps: 0
35
+ max_grad_norm: 5.0
36
+ max_train_samples: null
37
+ max_train_steps: 400
38
+ mixed_precision: 'no'
39
+ model_name_or_path: CompVis/stable-diffusion-v1-4
40
+ n_gpu: 1
41
+ noise_offset: 0.0
42
+ num_train_epochs: 200
43
+ num_validation_images: 1
44
+ output_dir: assets/models/people_Carlos_Menem_munba_200
45
+ per_device_train_batch_size: 2
46
+ prediction_type: null
47
+ random_flip: true
48
+ report_to: tensorboard
49
+ resolution: 512
50
+ resume_from_checkpoint: null
51
+ revision: null
52
+ save_strategy: epoch
53
+ save_total_limit: 2
54
+ seed: 42
55
+ should_log: true
56
+ use_8bit_adam: false
57
+ validation_epochs: 201
58
+ validation_prompt: An image of Carlos Menem
59
+ variant: null
models/people_Carlos_Menem_munba_200/logs/text2image-fine-tune/events.out.tfevents.1771450156.erdos.cl.itk.ppke.hu.3328580.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ccb4a62e5c9ebd9cc1e3bf384188e6056c74dadbb658a755d98745a47f2a912
3
+ size 984842
models/people_Carlos_Menem_munba_200/pytorch_lora_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af11d464dd040fbbc28fc2c8eec21db53c0e5657bee1a6ee41832f93b274c94a
3
+ size 12792952